code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , A_ , A_=13 , A_=32 , A_=2 , A_=3 , A_=16 , A_=[1, 2, 1] , A_=[2, 2, 4] , A_=2 , A_=2.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=True , A_=0.0_2 , A_=1e-5 , A_=True , A_=None , A_=True , A_=10 , A_=8 , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Any = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : List[str] = embed_dim
_UpperCAmelCase : Union[str, Any] = depths
_UpperCAmelCase : int = num_heads
_UpperCAmelCase : int = window_size
_UpperCAmelCase : Tuple = mlp_ratio
_UpperCAmelCase : Union[str, Any] = qkv_bias
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = drop_path_rate
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[int] = use_absolute_embeddings
_UpperCAmelCase : str = patch_norm
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : str = scope
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : int = encoder_stride
def __snake_case( self ):
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def __snake_case( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case( self , A_ , A_ , A_ ):
_UpperCAmelCase : List[Any] = SwinvaModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : str = model(A_ )
_UpperCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case( self , A_ , A_ , A_ ):
_UpperCAmelCase : str = SwinvaForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : str = 1
_UpperCAmelCase : Optional[int] = SwinvaForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case( self , A_ , A_ , A_ ):
_UpperCAmelCase : Tuple = self.type_sequence_label_size
_UpperCAmelCase : str = SwinvaForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Tuple = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self ):
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A , A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __snake_case( self ):
_UpperCAmelCase : Any = SwinvaModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A_ , embed_dim=37 )
def __snake_case( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case( self ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def __snake_case( self ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def __snake_case( self ):
pass
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = model_class(A_ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Any = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : List[Any] = outputs.attentions
_UpperCAmelCase : str = len(self.model_tester.depths )
self.assertEqual(len(A_ ) , A_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Optional[Any] = config.window_size**2
_UpperCAmelCase : List[Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase : List[Any] = len(A_ )
# Check attention is always last and order is fine
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : Tuple = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(A_ , A_ ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
_UpperCAmelCase : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(A_ ) )
_UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __snake_case( self , A_ , A_ , A_ , A_ ):
_UpperCAmelCase : List[Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : Union[str, Any] = outputs.hidden_states
_UpperCAmelCase : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# Swinv2 has a different seq_length
_UpperCAmelCase : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase : int = reshaped_hidden_states[0].shape
_UpperCAmelCase : Optional[Any] = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Tuple = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __snake_case( self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __snake_case( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = SwinvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __snake_case( self ):
_UpperCAmelCase,_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[Any] = _config_zero_init(A_ )
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __snake_case( self ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
A_ )
_UpperCAmelCase : Any = self.default_image_processor
_UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : List[Any] = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**A_ )
# verify the logits
_UpperCAmelCase : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
_UpperCAmelCase : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 643 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A )
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE = Features({'''image''': Image()} )
__SCREAMING_SNAKE_CASE = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE = "image"
__SCREAMING_SNAKE_CASE = "labels"
def __snake_case( self , A_ ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , A_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCAmelCase : Tuple = copy.deepcopy(self )
_UpperCAmelCase : str = self.label_schema.copy()
_UpperCAmelCase : Optional[Any] = features[self.label_column]
_UpperCAmelCase : int = label_schema
return task_template
@property
def __snake_case( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 643 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE = False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str )-> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Any:
"""simple docstring"""
UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase = "A painting of a squirrel eating a burger "
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase = generator.manual_seed(0 )
UpperCamelCase = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase = "A painting of a squirrel eating a burger "
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 708 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCamelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["c"] )
self.assertEqual(UpperCAmelCase_ , [2] )
# Out indices set to match out features
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(["a", "c"] , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features set to match out indices
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , [0, 2] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features selected from negative indices
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , [-3, -1] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [-3, -1] )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCAmelCase_ )
# Out features must be a list
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCamelCase = BackboneMixin()
UpperCamelCase = ["a", "b", "c"]
UpperCamelCase = ["a", "c"]
UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCamelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 556 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _A ( snake_case ) -> str:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class a__ ( lowerCamelCase_ ):
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_UpperCamelCase , required=_UpperCamelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_UpperCamelCase , required=_UpperCamelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_UpperCamelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_UpperCamelCase , default=_UpperCamelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ):
"""simple docstring"""
_lowercase : List[Any] = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
_lowercase : Dict = model_type
_lowercase : Union[str, Any] = tf_checkpoint
_lowercase : str = pytorch_dump_output
_lowercase : Union[str, Any] = config
_lowercase : List[str] = finetuning_task_name
def _lowerCamelCase ( self ):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
_lowercase : Tuple = self._tf_checkpoint
_lowercase : Optional[Any] = ""
else:
_lowercase : Optional[Any] = self._tf_checkpoint
_lowercase : str = ""
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 245 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
_snake_case = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
_snake_case = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = RealmTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
_lowercase : Optional[int] = do_lower_case
_lowercase : List[str] = strip_accents
_lowercase : List[Any] = tokenize_chinese_chars
_lowercase : Optional[int] = normalizer_class(**_UpperCamelCase )
_lowercase : Optional[Any] = do_lower_case
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = PaddingStrategy.MAX_LENGTH
_lowercase : Tuple = text
_lowercase : int = kwargs.pop("text_pair" , _UpperCamelCase )
_lowercase : Union[str, Any] = kwargs.pop("return_tensors" , _UpperCamelCase )
_lowercase : Optional[int] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_UpperCamelCase ):
if batch_text_pair is not None:
_lowercase : List[str] = batch_text_pair[idx]
else:
_lowercase : Dict = None
_lowercase : List[str] = super().__call__(_UpperCamelCase , _UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
_lowercase : Any = encoded_candidates.get("input_ids" )
_lowercase : Optional[Any] = encoded_candidates.get("attention_mask" )
_lowercase : Union[str, Any] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_UpperCamelCase )
_lowercase : int = {key: item for key, item in output_data.items() if len(_UpperCamelCase ) != 0}
return BatchEncoding(_UpperCamelCase , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : List[str] = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 245 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
def extract(*__lowercase : Tuple , **__lowercase : Dict ):
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
__a = torch.ones([0] )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(pipe.scheduler , __lowercase )
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
__a = StableDiffusionPipeline.from_pretrained(__lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__a = 4003660346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """padme amidala taking a bath artwork, safe for work, no nudity"""
__a = 2734971755
__a = 7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__a = 1044355234
__a = 12
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 547 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCamelCase =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCamelCase =[ord(letter) for letter in string.ascii_lowercase]
_lowerCamelCase ={ord(char) for char in VALID_CHARS}
_lowerCamelCase =["the", "be", "to", "of", "and", "in", "that", "have"]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = ""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
for keychar, cipherchar in zip(cycle(lowerCamelCase ), lowerCamelCase ):
lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def _a ( lowerCamelCase ):
lowerCamelCase : list[str] = []
for key in product(lowerCamelCase, repeat=3 ):
lowerCamelCase : Optional[Any] = try_key(lowerCamelCase, lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def _a ( lowerCamelCase, lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def _a ( lowerCamelCase = "p059_cipher.txt" ):
lowerCamelCase : list[int]
lowerCamelCase : list[str]
lowerCamelCase : str
lowerCamelCase : str
lowerCamelCase : str = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase : Union[str, Any] = [int(lowerCamelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase : List[Any] = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase : Any = filter_common_word(lowerCamelCase, lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
lowerCamelCase : List[Any] = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : int ,_a : Tuple=13 ,_a : str=7 ,_a : str=True ,_a : Optional[Any]=True ,_a : List[Any]=True ,_a : Optional[Any]=True ,_a : List[Any]=99 ,_a : Union[str, Any]=32 ,_a : Optional[int]=5 ,_a : Optional[int]=4 ,_a : Union[str, Any]=37 ,_a : List[str]="gelu" ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : Optional[int]=512 ,_a : Union[str, Any]=16 ,_a : Dict=2 ,_a : Any=0.02 ,_a : List[Any]=4 ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Any = batch_size
A_ : str = seq_length
A_ : Union[str, Any] = is_training
A_ : Any = use_attention_mask
A_ : str = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : str = initializer_range
A_ : int = num_choices
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = True
a_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = FlaxRoFormerModelTester(self )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" ,from_pt=_a )
A_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
A_ : List[Any] = model(_a )[0]
A_ : int = 50000
A_ : str = (1, 6, vocab_size)
self.assertEqual(output.shape ,_a )
A_ : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ ( snake_case ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mock-s3-bucket'''
__SCREAMING_SNAKE_CASE : int = F'''s3://{mock_bucket}'''
__SCREAMING_SNAKE_CASE : Dict = extract_path_from_uri(snake_case )
assert dataset_path.startswith('''s3://''' ) is False
__SCREAMING_SNAKE_CASE : List[str] = '''./local/path'''
__SCREAMING_SNAKE_CASE : str = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = is_remote_filesystem(snake_case )
assert is_remote is True
__SCREAMING_SNAKE_CASE : int = fsspec.filesystem('''file''' )
__SCREAMING_SNAKE_CASE : str = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
__SCREAMING_SNAKE_CASE : List[str] = input_paths[compression_fs_class.protocol]
if input_path is None:
__SCREAMING_SNAKE_CASE : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
__SCREAMING_SNAKE_CASE : str = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(snake_case , '''r''' , encoding='''utf-8''' ) as f, open(snake_case , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
__SCREAMING_SNAKE_CASE : Dict = compressed_file_paths[protocol]
__SCREAMING_SNAKE_CASE : int = '''dataset.jsonl'''
__SCREAMING_SNAKE_CASE : int = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__SCREAMING_SNAKE_CASE, *__SCREAMING_SNAKE_CASE : Dict = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = hf_api.dataset_info(snake_case , token=snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(snake_case ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 74 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """AutoTokenizer"""
a_ = ["""tokenizer"""]
a_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None ) -> Any:
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = speaker_embeddings
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="speaker_embeddings_path.json" , **lowerCAmelCase_ : str ) -> Optional[int]:
if speaker_embeddings_dict_path is not None:
__lowerCAmelCase = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop('subfolder' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase_ ) , force_download=kwargs.pop('force_download' , lowerCAmelCase_ ) , proxies=kwargs.pop('proxies' , lowerCAmelCase_ ) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase_ ) , revision=kwargs.pop('revision' , lowerCAmelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
__lowerCAmelCase = None
else:
with open(lowerCAmelCase_ ) as speaker_embeddings_json:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
else:
__lowerCAmelCase = None
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : int="speaker_embeddings_path.json" , lowerCAmelCase_ : str="speaker_embeddings" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : List[str] , ) -> Optional[int]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , 'v2' ) , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {}
__lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowerCAmelCase = self._load_voice_preset(lowerCAmelCase_ )
__lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowerCAmelCase_ , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , f"""{prompt_key}_{key}.npy""" )
__lowerCAmelCase = tmp_dict
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , 'w' ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : List[Any] ) -> Any:
__lowerCAmelCase = self.speaker_embeddings[voice_preset]
__lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
__lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase_ ) , force_download=kwargs.pop('force_download' , lowerCAmelCase_ ) , proxies=kwargs.pop('proxies' , lowerCAmelCase_ ) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase_ ) , revision=kwargs.pop('revision' , lowerCAmelCase_ ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
__lowerCAmelCase = np.load(lowerCAmelCase_ )
return voice_preset_dict
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[dict] = None ) -> List[str]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : int , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]="pt" , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=False , **lowerCAmelCase_ : Optional[int] , ) -> Any:
if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowerCAmelCase = self._load_voice_preset(lowerCAmelCase_ )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith('.npz' ):
__lowerCAmelCase = voice_preset + '.npz'
__lowerCAmelCase = np.load(lowerCAmelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
__lowerCAmelCase = self.tokenizer(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
if voice_preset is not None:
__lowerCAmelCase = voice_preset
return encoded_text
| 421 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : int | str ):
__lowerCAmelCase = str(lowerCAmelCase_ )
return n == n[::-1]
def a_ ( lowerCAmelCase_ : int = 100_0000 ):
__lowerCAmelCase = 0
for i in range(1, lowerCAmelCase_ ):
if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 421 | 1 |
import datasets
UpperCAmelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :List[Any] ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 2 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
'''simple docstring'''
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
a_ : Tuple = len(A_ )
while cur > 1:
# Find the maximum number in arr
a_ : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a_ : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(A_ )]
# Reverse whole list
a_ : Tuple = arr[cur - 1 :: -1] + arr[cur : len(A_ )]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case: Dict = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[int] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 460 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 460 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = AlbertConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = AlbertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 48 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a__ : Tuple = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
a__ : Optional[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
a__ : Any = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def __a ( self , a , a ):
UpperCamelCase__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase__ = evaluate(dataset=a , predictions=a )
return score
| 223 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCamelCase__ , UpperCamelCase__ = array[indexa], array[indexa]
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
for i in range(__A , low + middle ):
comp_and_swap(__A , __A , i + middle , __A )
bitonic_merge(__A , __A , __A , __A )
bitonic_merge(__A , low + middle , __A , __A )
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
bitonic_sort(__A , __A , __A , 1 )
bitonic_sort(__A , low + middle , __A , 0 )
bitonic_merge(__A , __A , __A , __A )
if __name__ == "__main__":
a__ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
a__ : Any = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 223 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DebertaTokenizer
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = DebertaTokenizerFast
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCamelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''[UNK]'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer('''Hello''' , '''World''' )
__lowerCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCamelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCamelCase = tokenizer(_snake_case , padding=_snake_case )
__lowerCamelCase = [tokenizer.decode(_snake_case , skip_special_tokens=_snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCamelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _snake_case )
for expected, decoded in zip(_snake_case , _snake_case ):
self.assertEqual(_snake_case , _snake_case )
| 316 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 316 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A = logging.get_logger('''transformers.models.speecht5''')
A = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
A = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
A = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
A = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
A = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
A = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
A = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
A = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A = []
A = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
A = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
A = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
A = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __A ( a_ :Any , a_ :List[str] , a_ :str , a_ :Optional[Any] , a_ :Optional[Any]) -> str:
for attribute in key.split('''.'''):
__a : Optional[Any] = getattr(a_ , a_)
if weight_type is not None:
__a : List[str] = getattr(a_ , a_).shape
else:
__a : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""")
if weight_type == "weight":
__a : Optional[int] = value
elif weight_type == "weight_g":
__a : Union[str, Any] = value
elif weight_type == "weight_v":
__a : Any = value
elif weight_type == "bias":
__a : Tuple = value
elif weight_type == "running_mean":
__a : int = value
elif weight_type == "running_var":
__a : str = value
elif weight_type == "num_batches_tracked":
__a : Optional[Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def __A ( a_ :str , a_ :Optional[int]) -> int:
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
__a , __a : Optional[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( a_ :str , a_ :str , a_ :Union[str, Any]) -> Dict:
__a : List[Any] = []
if task == "s2t":
__a : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
__a : int = MAPPING_S2T
__a : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
__a : int = None
__a : List[Any] = MAPPING_T2S
__a : List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__a : Dict = hf_model.speechta.encoder.prenet.feature_encoder
__a : int = MAPPING_S2S
__a : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""")
for name, value in fairseq_dict.items():
if should_ignore(a_ , a_):
logger.info(F"""{name} was ignored""")
continue
__a : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
__a : str = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a : List[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
__a : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a : List[Any] = True
if "*" in mapped_key:
__a : Union[str, Any] = name.split(a_)[0].split('''.''')[-2]
__a : Optional[Any] = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
__a : Dict = '''weight_g'''
elif "weight_v" in name:
__a : List[str] = '''weight_v'''
elif "bias" in name:
__a : List[Any] = '''bias'''
elif "weight" in name:
__a : List[Any] = '''weight'''
elif "running_mean" in name:
__a : Tuple = '''running_mean'''
elif "running_var" in name:
__a : Tuple = '''running_var'''
elif "num_batches_tracked" in name:
__a : Tuple = '''num_batches_tracked'''
else:
__a : Union[str, Any] = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
def __A ( a_ :int , a_ :str , a_ :Union[str, Any] , a_ :List[Any] , a_ :int) -> List[Any]:
__a : List[Any] = full_name.split('''conv_layers.''')[-1]
__a : int = name.split('''.''')
__a : Optional[Any] = int(items[0])
__a : Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""")
__a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""")
__a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""")
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""")
__a : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(a_)
@torch.no_grad()
def __A ( a_ :str , a_ :str , a_ :Tuple , a_ :Union[str, Any]=None , a_ :Any=None , a_ :Union[str, Any]=None , ) -> Optional[Any]:
if config_path is not None:
__a : int = SpeechTaConfig.from_pretrained(a_)
else:
__a : List[str] = SpeechTaConfig()
if task == "s2t":
__a : Any = config.max_text_positions
__a : Optional[Any] = SpeechTaForSpeechToText(a_)
elif task == "t2s":
__a : Dict = 18_76
__a : str = 6_00
__a : str = config.max_speech_positions
__a : List[Any] = SpeechTaForTextToSpeech(a_)
elif task == "s2s":
__a : List[Any] = 18_76
__a : Optional[int] = config.max_speech_positions
__a : Optional[int] = SpeechTaForSpeechToSpeech(a_)
else:
raise ValueError(F"""Unknown task name: {task}""")
if vocab_path:
__a : Any = SpeechTaTokenizer(a_ , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
__a : Optional[Any] = AddedToken('''<mask>''' , lstrip=a_ , rstrip=a_)
__a : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
__a : int = SpeechTaFeatureExtractor()
__a : Dict = SpeechTaProcessor(tokenizer=a_ , feature_extractor=a_)
processor.save_pretrained(a_)
__a : str = torch.load(a_)
recursively_load_weights(fairseq_checkpoint['''model'''] , a_ , a_)
model.save_pretrained(a_)
if repo_id:
print('''Pushing to the hub...''')
processor.push_to_hub(a_)
model.push_to_hub(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 101 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=36 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=1000 , ):
__a : Dict = parent
__a : Optional[int] = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : int = patch_size
__a : Tuple = text_seq_length
__a : Dict = is_training
__a : str = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : List[Any] = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : Any = num_hidden_layers
__a : List[str] = num_attention_heads
__a : str = intermediate_size
__a : int = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : List[Any] = type_vocab_size
__a : int = type_sequence_label_size
__a : str = initializer_range
__a : Dict = coordinate_size
__a : int = shape_size
__a : int = num_labels
__a : Optional[int] = num_choices
__a : Any = scope
__a : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : str = (image_size // patch_size) ** 2 + 1
__a : Tuple = self.text_seq_length + self.image_seq_length
def _lowerCamelCase ( self ):
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : Optional[Any] = bbox[i, j, 3]
__a : Union[str, Any] = bbox[i, j, 1]
__a : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : Optional[int] = bbox[i, j, 2]
__a : Optional[int] = bbox[i, j, 0]
__a : str = t
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : int = None
if self.use_token_type_ids:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : Dict = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = LayoutLMvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# text + image
__a : Union[str, Any] = model(_UpperCAmelCase , pixel_values=_UpperCAmelCase )
__a : List[Any] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__a : Optional[int] = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__a : List[str] = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : int = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : Tuple = model(pixel_values=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = self.num_labels
__a : Optional[Any] = LayoutLMvaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Union[str, Any] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self.num_labels
__a : int = LayoutLMvaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = LayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[Any] = config_and_inputs
__a : Optional[int] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _lowerCamelCase ( self ):
__a : str = LayoutLMvaModelTester(self )
__a : int = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : int = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
__a : List[str] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__a : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in get_values(_UpperCAmelCase ):
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
__a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
__a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
__a : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCAmelCase , )
return inputs_dict
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Optional[Any] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = LayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Dict:
__a : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : List[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_UpperCAmelCase )
__a : Dict = self.default_image_processor
__a : Tuple = prepare_img()
__a : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(_UpperCAmelCase )
__a : Tuple = torch.tensor([[1, 2]] )
__a : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__a : Tuple = model(
input_ids=input_ids.to(_UpperCAmelCase ) , bbox=bbox.to(_UpperCAmelCase ) , pixel_values=pixel_values.to(_UpperCAmelCase ) , )
# verify the logits
__a : Any = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase )
__a : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 101 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase__ : List[str] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def __lowercase ( self : Tuple ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(A )
UpperCAmelCase__ : List[str] = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(A ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""mecab""" )
self.assertIsNotNone(A )
UpperCAmelCase__ : int = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase__ : Tuple = tokenizer.tokenize(A )
self.assertListEqual(A ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(A ,"""wb""" ) as handle:
pickle.dump(A ,A )
with open(A ,"""rb""" ) as handle:
UpperCAmelCase__ : Tuple = pickle.load(A )
UpperCAmelCase__ : Optional[Any] = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
try:
UpperCAmelCase__ : Any = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def __lowercase ( self : str ):
'''simple docstring'''
try:
UpperCAmelCase__ : Optional[Any] = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MecabTokenizer(do_lower_case=A ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = MecabTokenizer(
do_lower_case=A ,normalize_text=A ,mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MecabTokenizer(normalize_text=A ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] ,)
@require_sudachi
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(A )
UpperCAmelCase__ : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase__ : Any = tokenizer.tokenize(A )
self.assertListEqual(A ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(A ,"""wb""" ) as handle:
pickle.dump(A ,A )
with open(A ,"""rb""" ) as handle:
UpperCAmelCase__ : int = pickle.load(A )
UpperCAmelCase__ : Any = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_sudachi
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人""", """参政権"""] )
@require_sudachi
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人参政権"""] )
@require_sudachi
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = SudachiTokenizer(do_lower_case=A ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = SudachiTokenizer(normalize_text=A ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] ,)
@require_sudachi
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = SudachiTokenizer(trim_whitespace=A ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
@require_jumanpp
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(A )
UpperCAmelCase__ : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(A )
self.assertListEqual(A ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(A ,"""wb""" ) as handle:
pickle.dump(A ,A )
with open(A ,"""rb""" ) as handle:
UpperCAmelCase__ : Optional[int] = pickle.load(A )
UpperCAmelCase__ : Dict = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_jumanpp
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = JumanppTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = JumanppTokenizer(normalize_text=A )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = JumanppTokenizer(trim_whitespace=A )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] ,)
@require_jumanpp
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) ,["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] ,)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
UpperCAmelCase__ : int = {}
for i, token in enumerate(A ):
UpperCAmelCase__ : List[str] = i
UpperCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=A ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) ,["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) ,["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
UpperCAmelCase__ : Any = tokenizer.subword_tokenizer
UpperCAmelCase__ : List[str] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(A ,["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
UpperCAmelCase__ : str = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(A ,["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
UpperCAmelCase__ : Tuple = tokenizer.encode("""ありがとう。""" ,add_special_tokens=A )
UpperCAmelCase__ : int = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=A )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
def __lowercase ( self : str ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowercase ( self : List[Any] ,**A : Optional[Any] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="""character""" ,**A )
def __lowercase ( self : Any ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase__ : Optional[int] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def __lowercase ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="""character""" )
UpperCAmelCase__ : str = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
A ,["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase__ : Optional[int] = {}
for i, token in enumerate(A ):
UpperCAmelCase__ : List[Any] = i
UpperCAmelCase__ : List[Any] = CharacterTokenizer(vocab=A ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) ,["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
UpperCAmelCase__ : int = tokenizer.encode("""ありがとう。""" ,add_special_tokens=A )
UpperCAmelCase__ : Dict = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=A )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """cl-tohoku/bert-base-japanese"""
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,A )
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
UpperCAmelCase__ : Dict = """bert-base-cased"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 65 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( __snake_case ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BridgeTowerImageProcessor'''
UpperCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowercase ( self : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 377 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
'''simple docstring'''
@staticmethod
def a__ (*lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = ObjectDetectionPipeline(model=lowerCamelCase_, image_processor=lowerCamelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png', threshold=0.0 )
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
import datasets
lowerCamelCase__ : List[Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils', 'image', split='test' )
lowerCamelCase__ : Optional[int] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCamelCase__ : Union[str, Any] = object_detector(lowerCamelCase_, threshold=0.0 )
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def a__ (self ):
'''simple docstring'''
pass
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCamelCase__ : str = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Any = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
], )
lowerCamelCase__ : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'facebook/detr-resnet-50'
lowerCamelCase__ : Dict = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'facebook/detr-resnet-50'
lowerCamelCase__ : List[str] = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : Any = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 0.9_985
lowerCamelCase__ : Dict = 'facebook/detr-resnet-50'
lowerCamelCase__ : Optional[int] = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
@require_torch
@require_pytesseract
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCamelCase__ : Tuple = 0.9_993
lowerCamelCase__ : str = pipeline('object-detection', model=lowerCamelCase_, threshold=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
], )
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ = logging.INFO
logger.setLevel(lowerCAmelCase__ )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : Optional[int] , _UpperCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
UpperCAmelCase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ = 1
UpperCAmelCase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=1.0 , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = max_gumbel_temp
UpperCAmelCase_ = min_gumbel_temp
UpperCAmelCase_ = gumbel_temp_decay
def lowercase__ ( self : List[Any] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCAmelCase_ = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
configure_logger(lowerCAmelCase__ , lowerCAmelCase__ )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCAmelCase__ )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ = datasets.map(
lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
UpperCAmelCase_ = vectorized_datasets.filter(
lambda lowerCAmelCase__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCAmelCase__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ = vectorized_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
UpperCAmelCase_ = WavaVecaForPreTraining(lowerCAmelCase__ )
UpperCAmelCase_ = DataCollatorForWavaVecaPretraining(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaPreTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowerCAmelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 82 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =value
__UpperCamelCase : Node | None =None
__UpperCamelCase : Node | None =None
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =tree
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =XLMTokenizer
UpperCamelCase__ : List[Any] =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : int =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Optional[int] =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='lower newer'
__UpperCamelCase : int ='lower newer'
return input_text, output_text
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =XLMTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : str ='lower'
__UpperCamelCase : int =['low', 'er</w>']
__UpperCamelCase : List[str] =tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =tokens + ['<unk>']
__UpperCamelCase : Optional[Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
__UpperCamelCase : Optional[int] =tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : str =tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 154 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_a : Optional[Any] = logging.getLogger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = '''summarization'''
_UpperCamelCase : Any = ['''loss''']
_UpperCamelCase : int = ROUGE_KEYS
_UpperCamelCase : Tuple = '''rouge2'''
def __init__( self , a__ , **a__ ):
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase : int = Path(self.output_dir ) / "metrics.json"
_lowerCAmelCase : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Tuple = defaultdict(UpperCAmelCase_ )
_lowerCAmelCase : Tuple = self.config.model_type
_lowerCAmelCase : Any = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_lowerCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase : Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_lowerCAmelCase : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase : List[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase : int = get_git_info()["repo_sha"]
_lowerCAmelCase : Any = hparams.num_workers
_lowerCAmelCase : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
_lowerCAmelCase : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase : str = self.decoder_start_token_id
_lowerCAmelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase : Dict = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase : List[str] = self.model.config.max_length
_lowerCAmelCase : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_lowerCAmelCase : int = True
return readable_batch
def __A ( self , a__ , **a__ ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def __A ( self , a__ ):
_lowerCAmelCase : str = self.tokenizer.pad_token_id
_lowerCAmelCase : List[str] = batch["input_ids"], batch["attention_mask"]
_lowerCAmelCase : List[str] = batch["labels"]
if isinstance(self.model , UpperCAmelCase_ ):
_lowerCAmelCase : int = self.model._shift_right(UpperCAmelCase_ )
else:
_lowerCAmelCase : Union[str, Any] = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase : Optional[int] = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_lowerCAmelCase : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase : List[str] = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase : List[Any] = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
_lowerCAmelCase : Tuple = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def __A ( self ):
return self.tokenizer.pad_token_id
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = self._step(UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
_lowerCAmelCase : Dict = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_lowerCAmelCase : Tuple = batch["input_ids"].shape[0]
_lowerCAmelCase : Any = batch["input_ids"].eq(self.pad ).sum()
_lowerCAmelCase : Optional[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __A ( self , a__ , a__ ):
return self._generative_step(UpperCAmelCase_ )
def __A ( self , a__ , a__="val" ):
self.step_count += 1
_lowerCAmelCase : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase : Union[str, Any] = losses["loss"]
_lowerCAmelCase : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_lowerCAmelCase : str = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase : torch.FloatTensor = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
_lowerCAmelCase : Tuple = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
_lowerCAmelCase : Any = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def __A ( self , a__ , a__ ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase : Union[str, Any] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase : Any = (time.time() - ta) / batch["input_ids"].shape[0]
_lowerCAmelCase : List[str] = self.ids_to_clean_text(UpperCAmelCase_ )
_lowerCAmelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
_lowerCAmelCase : Dict = self._step(UpperCAmelCase_ )
_lowerCAmelCase : Dict = dict(zip(self.loss_names , UpperCAmelCase_ ) )
_lowerCAmelCase : Dict = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : int = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def __A ( self , a__ , a__ ):
return self._generative_step(UpperCAmelCase_ )
def __A ( self , a__ ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix="""test""" )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.n_obs[type_path]
_lowerCAmelCase : int = self.target_lens[type_path]
_lowerCAmelCase : Any = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def __A ( self , a__ , a__ , a__ = False ):
_lowerCAmelCase : int = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase : int = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def __A ( self ):
_lowerCAmelCase : str = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def __A ( self ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def __A ( self ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __A ( a__ , a__ ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCAmelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCAmelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument("""--logger_name""" , type=UpperCAmelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCAmelCase_ , default="""summarization""" , required=UpperCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument("""--src_lang""" , type=UpperCAmelCase_ , default="""""" , required=UpperCAmelCase_ )
parser.add_argument("""--tgt_lang""" , type=UpperCAmelCase_ , default="""""" , required=UpperCAmelCase_ )
parser.add_argument("""--eval_beams""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
"""--val_metric""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = '''translation'''
_UpperCamelCase : Union[str, Any] = ['''loss''']
_UpperCamelCase : List[Any] = ['''bleu''']
_UpperCamelCase : Dict = '''bleu'''
def __init__( self , a__ , **a__ ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = hparams.src_lang
_lowerCAmelCase : Tuple = hparams.tgt_lang
def __A ( self , a__ , a__ ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Any=None ) -> Dict:
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase : SummarizationModule = SummarizationModule(_lowerCamelCase )
else:
_lowerCAmelCase : SummarizationModule = TranslationModule(_lowerCamelCase )
_lowerCAmelCase : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_lowerCAmelCase : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase : Union[str, Any] = os.environ.get("""WANDB_PROJECT""" ,_lowerCamelCase )
_lowerCAmelCase : Any = WandbLogger(name=model.output_dir.name ,project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase : Any = WandbLogger(name=model.output_dir.name ,project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
_lowerCAmelCase : str = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = args.val_metric == "loss"
_lowerCAmelCase : pl.Trainer = generic_train(
_lowerCamelCase ,_lowerCamelCase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,_lowerCamelCase ) ,early_stopping_callback=_lowerCamelCase ,logger=_lowerCamelCase ,)
pickle_save(model.hparams ,model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_lowerCAmelCase : str = ""
_lowerCAmelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir ,"""*.ckpt""" ) ,recursive=_lowerCamelCase ) )
if checkpoints:
_lowerCAmelCase : Tuple = checkpoints[-1]
_lowerCAmelCase : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
_a : Any = pl.Trainer.add_argparse_args(parser)
_a : Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_a : Optional[Any] = parser.parse_args()
main(args)
| 213 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
UpperCamelCase_ : int = False
def _A ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : List[Any] , **UpperCAmelCase_ : str ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : int = "adapt act apte"
return input_text, output_text
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : List[str] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
SCREAMING_SNAKE_CASE : str = "I am a small frog."
SCREAMING_SNAKE_CASE : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : int = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
SCREAMING_SNAKE_CASE : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE : Optional[int] = "."
SCREAMING_SNAKE_CASE : Dict = tok(UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = tok(UpperCAmelCase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
def a__ ( __UpperCamelCase ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ = fast.next.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE_ = None
while second:
SCREAMING_SNAKE_CASE_ = second.next
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = second
SCREAMING_SNAKE_CASE_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE_ = node.next
SCREAMING_SNAKE_CASE_ = head.next
return True
def a__ ( __UpperCamelCase ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE_ = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE_ = cur.next
return True
def a__ ( __UpperCamelCase ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = [pos]
SCREAMING_SNAKE_CASE_ = head.next
pos += 1
SCREAMING_SNAKE_CASE_ = pos - 1
SCREAMING_SNAKE_CASE_ = 0
for v in d.values():
if len(__UpperCamelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , len(__UpperCamelCase ) ):
if v[i] + v[len(__UpperCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 356 | from __future__ import annotations
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCamelCase )
return n == n[::-1]
def a__ ( __UpperCamelCase = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 356 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_snake_case , )
assert hasattr(self , 'env' )
def _a ( self : Optional[int] , _snake_case : int ):
"""simple docstring"""
A__ = {
'enabled': True,
'processes_per_host': 8,
}
A__ = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
A__ = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
A__ = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version='py36' , )
def _a ( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _a ( self : Dict , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _snake_case )
| 9 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""pixel_values"""]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = 1 / 255 , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
__A : Optional[int] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
__A : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__A : str = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name='''crop_size''' )
__A : Dict = do_resize
__A : Optional[Any] = size
__A : Optional[Any] = resample
__A : Dict = do_center_crop
__A : Any = crop_size
__A : Optional[Any] = do_rescale
__A : List[Any] = rescale_factor
__A : Union[str, Any] = do_normalize
__A : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__A : Dict = do_convert_rgb
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Optional[int] = get_resize_output_image_size(__lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ):
'''simple docstring'''
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : List[str] = get_size_dict(__lowerCamelCase , param_name='''size''' , default_to_square=__lowerCamelCase )
__A : List[Any] = resample if resample is not None else self.resample
__A : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Dict = crop_size if crop_size is not None else self.crop_size
__A : List[Any] = get_size_dict(__lowerCamelCase , param_name='''crop_size''' , default_to_square=__lowerCamelCase )
__A : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__A : str = image_mean if image_mean is not None else self.image_mean
__A : int = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : int = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : List[str] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__A : Optional[int] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
__A : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
__A : Dict = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
__A : str = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
__A : Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
__A : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__A : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 177 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase_ = NewType("""DataClass""", Any)
UpperCamelCase_ = NewType("""DataClassType""", Any)
def _lowerCAmelCase ( __magic_name__ : Dict ):
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _lowerCAmelCase ( __magic_name__ : list ):
lowercase : Optional[Any] ={str(__magic_name__ ): choice for choice in choices}
return lambda __magic_name__ : str_to_choice.get(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( *,
__magic_name__ : Union[str, List[str]] = None , __magic_name__ : str = None , __magic_name__ : Any = dataclasses.MISSING , __magic_name__ : Callable[[], Any] = dataclasses.MISSING , __magic_name__ : dict = None , **__magic_name__ : Optional[Any] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase : Tuple ={}
if aliases is not None:
lowercase : Dict =aliases
if help is not None:
lowercase : Optional[Any] =help
return dataclasses.field(metadata=__magic_name__ , default=__magic_name__ , default_factory=__magic_name__ , **__magic_name__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 42
def __init__( self : Tuple , UpperCAmelCase__ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase : Dict =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase__ )
if dataclasses.is_dataclass(UpperCAmelCase__ ):
lowercase : List[str] =[dataclass_types]
lowercase : Any =list(UpperCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : ArgumentParser , UpperCAmelCase__ : dataclasses.Field ):
'''simple docstring'''
lowercase : Optional[int] =F'''--{field.name}'''
lowercase : Optional[int] =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowercase : Dict =kwargs.pop('''aliases''' , [] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Dict =[aliases]
lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase__ , '''UnionType''' ) and isinstance(UpperCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(UpperCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
lowercase : Optional[int] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase : Dict =(
field.type.__args__[0] if isinstance(UpperCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase : Optional[int] =getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase : Union[str, Any] ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase__ ) and issubclass(field.type , UpperCAmelCase__ )):
if origin_type is Literal:
lowercase : Tuple =field.type.__args__
else:
lowercase : Optional[Any] =[x.value for x in field.type]
lowercase : str =make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowercase : List[Any] =field.default
else:
lowercase : List[str] =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase : Optional[Any] =copy(UpperCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
lowercase : str =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase : List[str] =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase : int =default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase : List[str] ='''?'''
# This is the value that will get picked if we do --field_name (without value)
lowercase : str =True
elif isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : int =field.type.__args__[0]
lowercase : Optional[int] ='''+'''
if field.default_factory is not dataclasses.MISSING:
lowercase : List[str] =field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase : Union[str, Any] =True
else:
lowercase : List[Any] =field.type
if field.default is not dataclasses.MISSING:
lowercase : Any =field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase : Dict =field.default_factory()
else:
lowercase : List[str] =True
parser.add_argument(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase : List[Any] =False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DataClassType ):
'''simple docstring'''
if hasattr(UpperCAmelCase__ , '''_argument_group_name''' ):
lowercase : List[Any] =self.add_argument_group(dtype._argument_group_name )
else:
lowercase : str =self
try:
lowercase : Dict[str, type] =get_type_hints(UpperCAmelCase__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase__ ):
lowercase : Dict ='''.'''.join(map(UpperCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase__ ):
if not field.init:
continue
lowercase : int =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase : Optional[Any] =[]
if args_filename:
args_files.append(Path(UpperCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase : int =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase__ , type=UpperCAmelCase__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase : Any =args_file_parser.parse_known_args(args=UpperCAmelCase__ )
lowercase : str =vars(UpperCAmelCase__ ).get(args_file_flag.lstrip('''-''' ) , UpperCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase__ ) for p in cmd_args_file_paths] )
lowercase : List[Any] =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase : Optional[Any] =file_args + args if args is not None else file_args + sys.argv[1:]
lowercase : Union[str, Any] =self.parse_known_args(args=UpperCAmelCase__ )
lowercase : Union[str, Any] =[]
for dtype in self.dataclass_types:
lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
lowercase : int ={k: v for k, v in vars(UpperCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict[str, Any] , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
lowercase : List[str] =set(args.keys() )
lowercase : Dict =[]
for dtype in self.dataclass_types:
lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
lowercase : Dict ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase : str =dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase__ )}''' )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
with open(Path(UpperCAmelCase__ ) , encoding='''utf-8''' ) as open_json_file:
lowercase : str =json.loads(open_json_file.read() )
lowercase : Tuple =self.parse_dict(UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
lowercase : Union[str, Any] =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase__ ).read_text() ) , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 700 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a : int = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 534 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = [e.name for e in KarrasDiffusionSchedulers]
__snake_case = 2
@register_to_config
def __init__( self , __lowerCamelCase = 1_0_0_0 , __lowerCamelCase = 0.0_0085 , __lowerCamelCase = 0.012 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "epsilon" , __lowerCamelCase = "linspace" , __lowerCamelCase = 0 , ) -> int:
if trained_betas is not None:
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE : List[str] = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_SCREAMING_SNAKE_CASE : int = 1.0 - self.betas
_SCREAMING_SNAKE_CASE : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps
_SCREAMING_SNAKE_CASE : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE : str = 1 if len(__lowerCamelCase ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE : Dict = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
_SCREAMING_SNAKE_CASE : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
_SCREAMING_SNAKE_CASE : Tuple = self.index_for_timestep(__lowerCamelCase )
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
_SCREAMING_SNAKE_CASE : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE : str = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
# interpolate sigmas
_SCREAMING_SNAKE_CASE : str = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowerCamelCase ).startswith("mps" ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : str = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
# interpolate timesteps
_SCREAMING_SNAKE_CASE : Optional[int] = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE : Dict = defaultdict(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
# get log sigma
_SCREAMING_SNAKE_CASE : Optional[int] = sigma.log()
# get distribution
_SCREAMING_SNAKE_CASE : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_SCREAMING_SNAKE_CASE : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE : int = low_idx + 1
_SCREAMING_SNAKE_CASE : Optional[int] = self.log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE : List[str] = (low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE : List[str] = (1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE : List[str] = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self.sample is None
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
_SCREAMING_SNAKE_CASE : int = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
_SCREAMING_SNAKE_CASE : int = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE : Any = self.sigmas_interpol[step_index + 1]
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE : int = sigma_interpol - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE : Optional[int] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_SCREAMING_SNAKE_CASE : str = sigma_next - sigma_hat
_SCREAMING_SNAKE_CASE : Any = self.sample
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : List[str] = self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : List[Any] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
_SCREAMING_SNAKE_CASE : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE : Optional[Any] = sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : str = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps | 249 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCAmelCase ( a_ , a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = args.log_outputs
SCREAMING_SNAKE_CASE : List[Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE : int = load_metric('wer' )
SCREAMING_SNAKE_CASE : Any = load_metric('cer' )
# compute metrics
SCREAMING_SNAKE_CASE : Optional[int] = wer.compute(references=result['target'] , predictions=result['prediction'] )
SCREAMING_SNAKE_CASE : Optional[Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
SCREAMING_SNAKE_CASE : int = f"""WER: {wer_result}\nCER: {cer_result}"""
print(a_ )
with open(f"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(a_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE : Any = f"""log_{dataset_id}_predictions.txt"""
SCREAMING_SNAKE_CASE : int = f"""log_{dataset_id}_targets.txt"""
with open(a_ , 'w' ) as p, open(a_ , 'w' ) as t:
# mapping function to write output
def write_to_file(a_ , a_ ):
p.write(f"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(a_ , with_indices=a_ )
def __lowerCAmelCase ( a_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(a_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE : str = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE : List[str] = ' '.join(text.split(a_ ) )
return text
def __lowerCAmelCase ( a_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=a_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE : Tuple = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE : int = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE : str = prediction['text']
SCREAMING_SNAKE_CASE : Optional[Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(a_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a_ , a_ )
if __name__ == "__main__":
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase :List[str] = parser.parse_args()
main(args)
| 179 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
_lowerCAmelCase :List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase :List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase ( a_ ) -> List[str]:
'''simple docstring'''
with open(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(a_ )
return im.convert('RGB' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCamelCase ( self ) -> Optional[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , a_ , a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : int = os.path.join(data_args.validation_dir , '**' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
'imagefolder' , data_files=a_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Optional[int] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : Optional[int] = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : List[Any] = split['train']
SCREAMING_SNAKE_CASE : Tuple = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : List[Any] = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = {}, {}
for i, label in enumerate(a_ ):
SCREAMING_SNAKE_CASE : List[str] = str(a_ )
SCREAMING_SNAKE_CASE : Dict = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : List[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a_ ) , labelaid=a_ , idalabel=a_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Any = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
RandomResizedCrop(a_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
Resize(a_ ),
CenterCrop(a_ ),
ToTensor(),
normalize,
] )
def train_transforms(a_ ):
SCREAMING_SNAKE_CASE : int = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(a_ ):
SCREAMING_SNAKE_CASE : Any = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : int = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a_ )
# Initalize our trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Tuple = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , a_ )
trainer.save_metrics('eval' , a_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
if __name__ == "__main__":
main()
| 179 | 1 |
'''simple docstring'''
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if not (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
_UpperCAmelCase : Any = len(lowerCAmelCase_ )
_UpperCAmelCase : str = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_UpperCAmelCase : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_UpperCAmelCase : Union[str, Any] = i
_UpperCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __a ):
snake_case : Dict = (UnCLIPScheduler,)
def snake_case_ (self , **lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCAmelCase__ )
return config
def snake_case_ (self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def snake_case_ (self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def snake_case_ (self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def snake_case_ (self ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCAmelCase__ )
def snake_case_ (self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def snake_case_ (self ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type="""learned_range""" )
_UpperCAmelCase : str = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : int = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowerCAmelCase__ ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowerCAmelCase__ ) - -0.0_0_1_0_0_1_1 < 1e-5
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.timesteps
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
_UpperCAmelCase : Dict = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[str] = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def snake_case_ (self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(2_5 )
_UpperCAmelCase : Optional[int] = scheduler.timesteps
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCAmelCase : int = None
else:
_UpperCAmelCase : Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[str] = scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[Any] = pred_prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
| 414 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 462 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCamelCase , lowerCamelCase =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCamelCase =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCamelCase =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 462 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''big_bird'''
def __init__( self , __lowerCAmelCase=5_0_3_5_8 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=6_6 , __lowerCAmelCase="block_sparse" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=6_4 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Union[str, Any] = vocab_size
__magic_name__ :List[str] = max_position_embeddings
__magic_name__ :Dict = hidden_size
__magic_name__ :Union[str, Any] = num_hidden_layers
__magic_name__ :str = num_attention_heads
__magic_name__ :Any = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Any = hidden_dropout_prob
__magic_name__ :str = attention_probs_dropout_prob
__magic_name__ :Union[str, Any] = initializer_range
__magic_name__ :Optional[int] = type_vocab_size
__magic_name__ :Optional[Any] = layer_norm_eps
__magic_name__ :List[str] = use_cache
__magic_name__ :Optional[Any] = rescale_embeddings
__magic_name__ :Optional[Any] = attention_type
__magic_name__ :Optional[Any] = use_bias
__magic_name__ :Optional[int] = block_size
__magic_name__ :List[Any] = num_random_blocks
__magic_name__ :List[str] = classifier_dropout
class lowerCamelCase_ ( lowerCamelCase ):
@property
def A ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ :int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ :Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 527 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=lowercase ):
UpperCamelCase : Union[str, Any] = ["""torch""", """scipy"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def __snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def __snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ['torch', 'scipy'] )
| 254 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
return base * power(_snake_case ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
UpperCamelCase__ = int(input('Enter the base: ').strip())
UpperCamelCase__ = int(input('Enter the exponent: ').strip())
UpperCamelCase__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCamelCase__ = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 254 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
def __init__( self , a_ , a_=1_3 , a_=3_0 , a_=2 , a_=3 , a_=True , a_=True , a_=3_2 , a_=2 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_0 , a_=0.02 , a_=3 , a_=0.6 , a_=None , ):
a_ : Optional[Any] = parent
a_ : Optional[int] = batch_size
a_ : Tuple = image_size
a_ : Any = patch_size
a_ : Optional[int] = num_channels
a_ : Dict = is_training
a_ : Union[str, Any] = use_labels
a_ : List[str] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Dict = intermediate_size
a_ : int = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Tuple = type_sequence_label_size
a_ : Optional[int] = initializer_range
a_ : List[str] = mask_ratio
a_ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a_ : str = (image_size // patch_size) ** 2
a_ : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case_ ( self ):
a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : int = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : str = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : List[str] = TFViTMAEModel(config=a_ )
a_ : List[Any] = model(a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : Optional[int] = TFViTMAEForPreTraining(a_ )
a_ : Optional[Any] = model(a_ , training=a_ )
# expected sequence length = num_patches
a_ : Dict = (self.image_size // self.patch_size) ** 2
a_ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a_ : int = 1
a_ : str = TFViTMAEForPreTraining(a_ )
a_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Optional[int] = model(a_ , training=a_ )
a_ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case_ ( self ):
a_ : Tuple = self.prepare_config_and_inputs()
((a_) , (a_) , (a_)) : Dict = config_and_inputs
a_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case_ ( self ):
a_ : List[Any] = TFViTMAEModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=3_7 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , tf.keras.layers.Layer ) )
def snake_case_ ( self ):
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(a_ )
a_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[int] = [*signature.parameters.keys()]
a_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def snake_case_ ( self ):
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ):
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def snake_case_ ( self ):
# make the mask reproducible
np.random.seed(2 )
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : str = int((config.image_size // config.patch_size) ** 2 )
a_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(a_ )
a_ : int = self._prepare_for_class(a_ , a_ )
a_ : Any = model(a_ , noise=a_ )
a_ : int = copy.deepcopy(self._prepare_for_class(a_ , a_ ) )
a_ : Any = model(**a_ , noise=a_ )
a_ : Union[str, Any] = outputs_dict[0].numpy()
a_ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case_ ( self ):
# make the mask reproducible
np.random.seed(2 )
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[str] = int((config.image_size // config.patch_size) ** 2 )
a_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a_ ):
a_ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a_ ):
a_ : Optional[int] = v.numpy()
else:
a_ : List[Any] = np.array(a_ )
return inputs_np_dict
for model_class in self.all_model_classes:
a_ : List[str] = model_class(a_ )
a_ : Optional[Any] = self._prepare_for_class(a_ , a_ )
a_ : Dict = prepare_numpy_arrays(a_ )
a_ : Optional[int] = model(a_ , noise=a_ )
a_ : Optional[int] = model(**a_ , noise=a_ )
self.assert_outputs_same(a_ , a_ )
def snake_case_ ( self , a_ , a_ , a_ ):
# make masks reproducible
np.random.seed(2 )
a_ : str = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
a_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a_ : Tuple = tf.constant(a_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a_ : Any = tf_noise
super().check_pt_tf_models(a_ , a_ , a_ )
def snake_case_ ( self ):
# make mask reproducible
np.random.seed(2 )
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(a_ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(a_ , a_ ),)
if isinstance(a_ , a_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a_ , "_keras_serializable" , a_ )
}
a_ : int = int((config.image_size // config.patch_size) ** 2 )
a_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a_ : Dict = tf.convert_to_tensor(a_ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
a_ : Dict = main_layer_class(a_ )
a_ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
a_ : str = tf.keras.Model(a_ , outputs=main_layer(a_ ) )
a_ : Tuple = model(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Optional[int] = os.path.join(a_ , "keras_model.h5" )
model.save(a_ )
a_ : List[Any] = tf.keras.models.load_model(
a_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(a_ , tf.keras.Model )
a_ : int = model(a_ )
self.assert_outputs_same(a_ , a_ )
@slow
def snake_case_ ( self ):
# make mask reproducible
np.random.seed(2 )
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = int((config.image_size // config.patch_size) ** 2 )
a_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(a_ )
a_ : List[Any] = self._prepare_for_class(a_ , a_ )
a_ : Any = model(a_ , noise=a_ )
if model_class.__name__ == "TFViTMAEModel":
a_ : Optional[int] = outputs.last_hidden_state.numpy()
a_ : List[str] = 0
else:
a_ : List[Any] = outputs.logits.numpy()
a_ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ , saved_model=a_ )
a_ : Union[str, Any] = model_class.from_pretrained(a_ )
a_ : Dict = model(a_ , noise=a_ )
if model_class.__name__ == "TFViTMAEModel":
a_ : Optional[int] = after_outputs["last_hidden_state"].numpy()
a_ : str = 0
else:
a_ : Union[str, Any] = after_outputs["logits"].numpy()
a_ : str = 0
a_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1e-5 )
def snake_case_ ( self ):
# make mask reproducible
np.random.seed(2 )
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
a_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a_ : str = model_class(a_ )
a_ : int = self._prepare_for_class(a_ , a_ )
a_ : Optional[Any] = model(a_ , noise=a_ )
a_ : Optional[int] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a_ )
a_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
a_ : Dict = model_class.from_config(model.config )
a_ : List[str] = new_model(a_ ) # Build model
new_model.set_weights(model.get_weights() )
a_ : List[Any] = new_model(a_ , noise=a_ )
self.assert_outputs_same(a_ , a_ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case_ ( self ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
a_ : int = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(a_ )
def lowerCAmelCase_ ( ) -> Dict:
a_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
a_ : Union[str, Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
a_ : List[str] = self.default_image_processor
a_ : Optional[int] = prepare_img()
a_ : Tuple = image_processor(images=a_ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a_ : List[str] = ViTMAEConfig()
a_ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a_ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
a_ : int = model(**a_ , noise=a_ )
# verify the logits
a_ : str = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , a_ )
a_ : List[Any] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , a_ , atol=1e-4 ) | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""DPTFeatureExtractor"""]
SCREAMING_SNAKE_CASE_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> str:
lowercase_ : int = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : int = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : int = depths
lowercase_ : Any = is_training
lowercase_ : int = use_labels
lowercase_ : str = hidden_act
lowercase_ : str = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase (self ) -> Any:
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values
def _lowerCamelCase (self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase (self , _a , _a ) -> Dict:
lowercase_ : str = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = model(SCREAMING_SNAKE_CASE_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase (self , _a , _a ) -> List[Any]:
lowercase_ : List[str] = self.num_labels
lowercase_ : Optional[int] = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : List[Any] = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ : Any = config_and_inputs
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A : Dict = False
A : str = False
A : Union[str, Any] = False
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : Tuple = FlaxRegNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase (self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase (self ) -> Dict:
return
def _lowerCamelCase (self ) -> str:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _lowerCamelCase (self ) -> Tuple:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _lowerCamelCase (self ) -> Union[str, Any]:
pass
def _lowerCamelCase (self ) -> List[str]:
lowercase_ ,lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase (self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
lowercase_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
lowercase_ ,lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ ,lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ : Dict = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(_a , **_a ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
lowercase_ : List[str] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase_ : Dict = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ):
lowercase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase (self ) -> List[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _lowerCamelCase (self ) -> int:
lowercase_ : Optional[int] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowercase_ : Any = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ : Tuple = (1, 1_000)
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 720 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase_ : List[Any] = 128
elif "12-12" in model_name:
lowercase_ : Tuple = 12
lowercase_ : List[Any] = 12
elif "14-14" in model_name:
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase_ : Union[str, Any] = 16
lowercase_ : List[str] = 16
else:
raise ValueError('Model not supported' )
lowercase_ : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase_ : List[str] = 35
lowercase_ : int = 'speech-commands-v2-id2label.json'
else:
lowercase_ : Union[str, Any] = 527
lowercase_ : int = 'audioset-id2label.json'
lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[int] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if "module.v" in name:
lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : List[str] = key.split('.' )
lowercase_ : int = int(key_split[3] )
lowercase_ : Tuple = config.hidden_size
if "weight" in key:
lowercase_ : Tuple = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Optional[int] = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[:dim]
lowercase_ : Any = val[dim : dim * 2]
lowercase_ : Tuple = val[-dim:]
else:
lowercase_ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase_ : Dict = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128
lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase_ : Any = dataset[0]['audio']['array']
else:
lowercase_ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = waveform.squeeze().numpy()
lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
from collections import defaultdict
from math import gcd
def lowercase_ ( __snake_case : int = 1_50_00_00 ) -> int:
'''simple docstring'''
snake_case__ :defaultdict = defaultdict(__snake_case )
snake_case__ :List[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
snake_case__ :Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''') | 241 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = '''char'''
UpperCamelCase_ = '''bpe'''
UpperCamelCase_ = '''wp'''
_UpperCamelCase: Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase_ = '''ViTImageProcessor'''
UpperCamelCase_ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Optional[Any] ) -> int:
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_lowerCAmelCase = tokenizer
_lowerCAmelCase = AutoTokenizer.from_pretrained('gpt2' )
_lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : Optional[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_lowerCAmelCase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None:
_lowerCAmelCase = self.char_tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : List[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = sequences
_lowerCAmelCase = char_preds.size(0 )
_lowerCAmelCase , _lowerCAmelCase = self._decode_helper(_lowerCAmelCase , 'char' )
_lowerCAmelCase , _lowerCAmelCase = self._decode_helper(_lowerCAmelCase , 'bpe' )
_lowerCAmelCase , _lowerCAmelCase = self._decode_helper(_lowerCAmelCase , 'wp' )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCAmelCase = scores.index(max(_lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCAmelCase = {}
_lowerCAmelCase = final_strs
_lowerCAmelCase = final_scores
_lowerCAmelCase = char_strs
_lowerCAmelCase = bpe_strs
_lowerCAmelCase = wp_strs
return out
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> Dict:
if format == DecodeType.CHARACTER:
_lowerCAmelCase = self.char_decode
_lowerCAmelCase = 1
_lowerCAmelCase = '[s]'
elif format == DecodeType.BPE:
_lowerCAmelCase = self.bpe_decode
_lowerCAmelCase = 2
_lowerCAmelCase = '#'
elif format == DecodeType.WORDPIECE:
_lowerCAmelCase = self.wp_decode
_lowerCAmelCase = 102
_lowerCAmelCase = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = pred_logits.size(0 )
_lowerCAmelCase = pred_logits.size(1 )
_lowerCAmelCase , _lowerCAmelCase = pred_logits.topk(1 , dim=-1 , largest=_lowerCAmelCase , sorted=_lowerCAmelCase )
_lowerCAmelCase = preds_index.view(-1 , _lowerCAmelCase )[:, 1:]
_lowerCAmelCase = decoder(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = torch.nn.functional.softmax(_lowerCAmelCase , dim=2 ).max(dim=2 )
_lowerCAmelCase = preds_max_prob[:, 1:]
for index in range(_lowerCAmelCase ):
_lowerCAmelCase = preds_str[index].find(_lowerCAmelCase )
_lowerCAmelCase = preds_str[index][:pred_eos]
_lowerCAmelCase = preds_index[index].cpu().tolist()
_lowerCAmelCase = pred_index.index(_lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
_lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_lowerCAmelCase )
conf_scores.append(_lowerCAmelCase )
return dec_strs, conf_scores
def SCREAMING_SNAKE_CASE_ ( self : str , _lowerCAmelCase : List[str] ) -> List[str]:
_lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_lowerCAmelCase )]
return decode_strs
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : int ) -> str:
return self.bpe_tokenizer.batch_decode(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : str , _lowerCAmelCase : Optional[int] ) -> Tuple:
_lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_lowerCAmelCase )]
return decode_strs
| 585 |
import requests
from bsa import BeautifulSoup
def _a ( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowerCAmelCase = soup.findAll('h1' )
_lowerCAmelCase = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 585 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase__ : Dict = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCamelCase__ : Dict = {
"facebook/blenderbot_small-90M": 512,
}
class _a (__lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , A__=True , **A__ , ) -> List[str]:
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE_ , merges=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , ) , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self , A__ , A__=None ) -> Dict:
_SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , A__ , A__ = None ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 591 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513 | 0 |
"""simple docstring"""
from math import factorial
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = real
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = [1] * rank
else:
__UpperCamelCase = rank
def __repr__( self ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(__UpperCAmelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __UpperCAmelCase )
def __add__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Dual(self.real + other , self.duals )
__UpperCamelCase = self.duals.copy()
__UpperCamelCase = other.duals.copy()
if len(__UpperCAmelCase ) > len(__UpperCAmelCase ):
o_dual.extend([1] * (len(__UpperCAmelCase ) - len(__UpperCAmelCase )) )
elif len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
s_dual.extend([1] * (len(__UpperCAmelCase ) - len(__UpperCAmelCase )) )
__UpperCamelCase = []
for i in range(len(__UpperCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __UpperCAmelCase )
lowercase = __add__
def __sub__( self , __UpperCAmelCase ):
'''simple docstring'''
return self + other * -1
def __mul__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __UpperCAmelCase )
__UpperCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __UpperCAmelCase )
lowercase = __mul__
def __truediv__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __UpperCAmelCase )
raise ValueError
def __floordiv__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __UpperCAmelCase )
raise ValueError
def __pow__( self , __UpperCAmelCase ):
'''simple docstring'''
if n < 0 or isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def A ( snake_case :List[str] , snake_case :List[Any] , snake_case :Optional[Any] ) -> Optional[Any]:
if not callable(snake_case ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(snake_case , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(snake_case , snake_case ):
raise ValueError('differentiate() requires an int as input for order' )
__UpperCamelCase = Dual(snake_case , 1 )
__UpperCamelCase = func(snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A ( snake_case :Tuple ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 713 |
"""simple docstring"""
import argparse
import struct
import unittest
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = data
# Initialize hash values
__UpperCamelCase = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
__UpperCamelCase = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
__UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = b'\x80' + (b'\x00' * (63 - (len(__UpperCAmelCase ) + 8) % 64))
__UpperCamelCase = struct.pack('>Q' , (len(__UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCamelCase = list(struct.unpack('>16L' , __UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__UpperCamelCase = self.ror(__UpperCAmelCase , 6 ) ^ self.ror(__UpperCAmelCase , 11 ) ^ self.ror(__UpperCAmelCase , 25 )
__UpperCamelCase = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
__UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__UpperCamelCase = self.ror(__UpperCAmelCase , 2 ) ^ self.ror(__UpperCAmelCase , 13 ) ^ self.ror(__UpperCAmelCase , 22 )
__UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
__UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__UpperCamelCase = ''.join([hex(__UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
import hashlib
__UpperCamelCase = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__UpperCAmelCase ).hash , hashlib.shaaaa(__UpperCAmelCase ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = bytes(snake_case , 'utf-8' )
print(SHAaaa(snake_case ).hash )
if __name__ == "__main__":
main()
| 293 | 0 |
import math
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0 ):
__a : Dict = sum(i * i for i in range(1 , n + 1 ) )
__a : List[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 47 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if config is None:
assert isinstance(self.model , UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
lowercase : Tuple =self.model.config
else:
lowercase : List[str] =config
lowercase : Any =data_args
lowercase : List[Any] =self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase : Optional[int] =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase : Tuple =label_smoothed_nll_loss
def A__ ( self : str , UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
if self.optimizer is None:
lowercase : Optional[Any] =['''bias''', '''LayerNorm.weight''']
lowercase : List[str] =[
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase : Optional[int] =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase : Tuple =Adafactor
lowercase : List[Any] ={'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase : int =AdamW
lowercase : Union[str, Any] ={
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase : Optional[int] =self.args.learning_rate
if self.sharded_ddp:
lowercase : Union[str, Any] =OSS(
params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , )
else:
lowercase : Dict =optimizer_cls(UpperCAmelCase , **UpperCAmelCase )
if self.lr_scheduler is None:
lowercase : str =self._get_lr_scheduler(UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def A__ ( self : Any , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase : Tuple =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase : str =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase : Any =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase )
return scheduler
def A__ ( self : str ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int ) -> int:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase : List[str] =model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
lowercase : Dict =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase , lowercase : List[Any] =model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2]
else:
# compute label smoothed loss
lowercase : Dict =model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
lowercase : int =torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
lowercase , lowercase : Union[str, Any] =self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A__ ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =inputs.pop('''labels''' )
lowercase , lowercase : Optional[int] =self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return loss
def A__ ( self : Tuple , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
lowercase : Any =self._prepare_inputs(UpperCAmelCase )
lowercase : List[Any] ={
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase : Tuple =self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase : Any =self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs['''max_length'''] )
lowercase : Optional[Any] =inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase , lowercase : Any =self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase : List[Any] =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase : Union[str, Any] =self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f' padded to `max_length`={max_length}' )
lowercase : Union[str, Any] =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase : Union[str, Any] =tensor
return padded_tensor
| 94 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__SCREAMING_SNAKE_CASE : Tuple ={
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
# Initialise PyTorch model
lowercase = XLNetConfig.from_json_file(lowerCAmelCase__ )
lowercase = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase = finetuning_task
lowercase = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
lowercase = finetuning_task
lowercase = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
lowercase = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Save pytorch-model
lowercase = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}""" )
torch.save(model.state_dict() ,lowerCAmelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase__ )}""" )
with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__SCREAMING_SNAKE_CASE : int =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Tuple ={
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] =[
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 72 | 0 |
import random
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
__snake_case = num - 1
__snake_case = 0
while s % 2 == 0:
__snake_case = s // 2
t += 1
for _ in range(5 ):
__snake_case = random.randrange(2 , num - 1 )
__snake_case = pow(snake_case_ , snake_case_ , snake_case_ )
if v != 1:
__snake_case = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__snake_case = i + 1
__snake_case = (v**2) % num
return True
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if num < 2:
return False
__snake_case = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case_ )
def lowerCamelCase__ ( snake_case_ : int = 1024 ) -> int:
while True:
__snake_case = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case_ ):
return num
if __name__ == "__main__":
snake_case_ = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 592 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=snake_case_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=snake_case_ , default=5 )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=snake_case_ , default=1 )
parser.add_argument('''--freeze''' , type=snake_case_ , default=snake_case_ )
parser.add_argument('''--learning_rate''' , type=snake_case_ , default=5e-4 )
parser.add_argument('''--seed''' , type=snake_case_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=snake_case_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=snake_case_ , default=10 )
parser.add_argument('''--weight_decay''' , type=snake_case_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=snake_case_ , default='''./results''' )
return parser.parse_args()
snake_case_ = load('accuracy')
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[Any]:
__snake_case , __snake_case = eval_pred
__snake_case = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Any ):
"""simple docstring"""
super().__init__()
__snake_case = trainer
def a (self : List[Any] , a__ : Optional[Any] , a__ : int , a__ : Tuple , **a__ : Optional[Any] ):
"""simple docstring"""
if control.should_evaluate:
__snake_case = deepcopy(a__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase__ ( ) -> Dict:
__snake_case = get_args()
set_seed(args.seed )
__snake_case = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__snake_case = dataset.train_test_split(test_size=0.2 )
__snake_case = train_test['''test'''].train_test_split(test_size=0.5 )
__snake_case = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case = tokenizer.eos_token
__snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__snake_case = False
__snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(snake_case_ : Any ):
__snake_case = tokenizer(example['''src'''] , truncation=snake_case_ , max_length=1024 )
__snake_case = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__snake_case = train_test_validation.map(
snake_case_ , batched=snake_case_ , remove_columns=train_test_validation['''train'''].column_names , )
__snake_case = DataCollatorWithPadding(tokenizer=snake_case_ )
__snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__snake_case = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 592 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __snake_case ( a__):
_lowerCAmelCase = '''vivit'''
def __init__( self, A=224, A=32, A=[2, 16, 16], A=3, A=768, A=12, A=12, A=3072, A="gelu_fast", A=0.0, A=0.0, A=0.02, A=1e-06, A=True, **A, ):
"""simple docstring"""
lowerCamelCase : str = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Any = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : List[str] = image_size
lowerCamelCase : Any = num_frames
lowerCamelCase : Any = tubelet_size
lowerCamelCase : Any = num_channels
lowerCamelCase : List[Any] = qkv_bias
super().__init__(**A )
| 706 |
'''simple docstring'''
# Imports
import numpy as np
class __snake_case :
def __init__( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
def UpperCAmelCase_ ( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
if red is not None:
lowerCamelCase : Optional[int] = red
if green is not None:
lowerCamelCase : Optional[int] = green
if blue is not None:
lowerCamelCase : List[str] = blue
if red_edge is not None:
lowerCamelCase : Tuple = red_edge
if nir is not None:
lowerCamelCase : Any = nir
return True
def UpperCAmelCase_ ( self, A="", A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
lowerCamelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase_ ( self, A=0.08, A=1.22, A=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase_ ( self, A=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase_ ( self, A=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase_ ( self, A=None, A=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 449 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 3_0_0
return config
def snake_case_ ( self , lowerCAmelCase__):
self.parent.assertListEqual(list(result.loss.size()) , [])
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Optional[Any] = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = True
__lowercase : Tuple = False
__lowercase : Tuple = False
__lowercase : Optional[int] = False
__lowercase : Any = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DebertaModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""")
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4) , f"{output[:, 1:4, 1:4]}")
| 155 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
__SCREAMING_SNAKE_CASE = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number >= 0: # Get binary representation of positive number
__SCREAMING_SNAKE_CASE = """0""" + str(bin(UpperCamelCase_ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
__SCREAMING_SNAKE_CASE = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
__SCREAMING_SNAKE_CASE = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
"""1""" + """0""" * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCAmelCase_ = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCAmelCase_ = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = RoFormerTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
):
_snake_case = getattr(lowerCAmelCase_ , pre_tok_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = pre_tok_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def __getstate__( self ) -> Union[str, Any]:
_snake_case = self.__dict__.copy()
_snake_case = BertPreTokenizer()
return state
def __setstate__( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = d
_snake_case = self.__dict__['_tokenizer'].get_vocab()
_snake_case = PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
_snake_case = BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
| 704 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase_ = 2
class UpperCamelCase_ :
def __init__( self , *, # begin keyword-only arguments
lowerCAmelCase_="<s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=None , ) -> Optional[int]:
_snake_case , _snake_case , _snake_case , _snake_case = bos, unk, pad, eos
_snake_case = []
_snake_case = []
_snake_case = {}
_snake_case = self.add_symbol(lowerCAmelCase_ )
_snake_case = self.add_symbol(lowerCAmelCase_ )
_snake_case = self.add_symbol(lowerCAmelCase_ )
_snake_case = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
_snake_case = len(self.symbols )
def __eq__( self , lowerCAmelCase_ ) -> List[Any]:
return self.indices == other.indices
def __getitem__( self , lowerCAmelCase_ ) -> Dict:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Optional[Any]:
return len(self.symbols )
def __contains__( self , lowerCAmelCase_ ) -> int:
return sym in self.indices
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ ) -> List[Any]:
_snake_case = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> str:
if word in self.indices and not overwrite:
_snake_case = self.indices[word]
_snake_case = self.count[idx] + n
return idx
else:
_snake_case = len(self.symbols )
_snake_case = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
return 0
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
_snake_case = f.readlines()
_snake_case = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
_snake_case , _snake_case = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_snake_case = True
_snake_case , _snake_case = line.rsplit(' ' , 1 )
else:
_snake_case = False
_snake_case = int(lowerCAmelCase_ )
_snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_snake_case = dict((re.sub(R'@@$' , '' , UpperCamelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCamelCase__ ), v) for k, v in d.items() )
_snake_case = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_snake_case = d[k] # restore
return da
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> List[str]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case = os.path.join(UpperCamelCase__ , 'checkpoint.pt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
_snake_case = chkpt['cfg']['model']
# dicts
_snake_case = os.path.join(UpperCamelCase__ , 'dict.txt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
_snake_case = Dictionary.load(UpperCamelCase__ )
_snake_case = rewrite_dict_keys(src_dict.indices )
_snake_case = len(UpperCamelCase__ )
_snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
_snake_case = os.path.join(UpperCamelCase__ , 'bpecodes' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
_snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
_snake_case = os.path.join(UpperCamelCase__ , 'config.json' )
_snake_case = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
_snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
_snake_case = chkpt['model']
# remove unneeded keys
_snake_case = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_snake_case = model_state_dict.pop(UpperCamelCase__ )
else:
_snake_case = model_state_dict.pop(UpperCamelCase__ )
_snake_case = BioGptConfig.from_pretrained(UpperCamelCase__ )
_snake_case = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
_snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('Conversion is done!' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 541 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class __A :
def __init__( self : Optional[Any] , __snake_case : int ) -> None:
__magic_name__: Optional[int] = size
# approximate the overall size of segment tree with given value
__magic_name__: str = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__magic_name__: Dict = [0 for i in range(0 , 4 * size )]
__magic_name__: Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase__ ( self : str , __snake_case : int ) -> int:
return idx * 2
def lowerCamelCase__ ( self : Dict , __snake_case : int ) -> int:
return idx * 2 + 1
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ) -> None:
if left_element == right_element:
__magic_name__: List[Any] = a[left_element - 1]
else:
__magic_name__: Any = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
__magic_name__: List[str] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
if self.flag[idx] is True:
__magic_name__: int = self.lazy[idx]
__magic_name__: Any = False
if left_element != right_element:
__magic_name__: Optional[Any] = self.lazy[idx]
__magic_name__: Dict = self.lazy[idx]
__magic_name__: Dict = True
__magic_name__: Optional[int] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__magic_name__: Optional[Any] = val
if left_element != right_element:
__magic_name__: Dict = val
__magic_name__: Dict = val
__magic_name__: Union[str, Any] = True
__magic_name__: List[Any] = True
return True
__magic_name__: int = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> int | float:
if self.flag[idx] is True:
__magic_name__: List[str] = self.lazy[idx]
__magic_name__: str = False
if left_element != right_element:
__magic_name__: int = self.lazy[idx]
__magic_name__: Dict = self.lazy[idx]
__magic_name__: List[str] = True
__magic_name__: List[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__magic_name__: Optional[Any] = (left_element + right_element) // 2
__magic_name__: Union[str, Any] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Dict ) -> str:
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__lowerCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowerCamelCase = 15
__lowerCamelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 96 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
# TODO Update this
__snake_case : Union[str, Any] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'esm'
def __init__( self : Tuple , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=10_26 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : List[Any]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : int =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Tuple =max_position_embeddings
A__ : List[Any] =initializer_range
A__ : Optional[Any] =layer_norm_eps
A__ : Union[str, Any] =position_embedding_type
A__ : str =use_cache
A__ : Optional[int] =emb_layer_norm_before
A__ : Union[str, Any] =token_dropout
A__ : Tuple =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A__ : Optional[int] =EsmFoldConfig()
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =EsmFoldConfig(**lowerCAmelCase_ )
A__ : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A__ : List[str] =get_default_vocab_list()
else:
A__ : List[str] =vocab_list
else:
A__ : Union[str, Any] =None
A__ : List[Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCAmelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase_ ):
A__ : Union[str, Any] =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = True
__snake_case = False
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A__ : int =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase_ ):
A__ : str =TrunkConfig(**self.trunk )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =asdict(self )
A__ : Tuple =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 48
__snake_case = 1024
__snake_case = 128
__snake_case = 32
__snake_case = 32
__snake_case = 32
__snake_case = 0
__snake_case = 0
__snake_case = False
__snake_case = 4
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A__ : Dict =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase_ ):
A__ : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : List[str] =self.sequence_state_dim // self.sequence_head_width
A__ : Optional[int] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : int =asdict(self )
A__ : Optional[Any] =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 384
__snake_case = 128
__snake_case = 16
__snake_case = 128
__snake_case = 12
__snake_case = 4
__snake_case = 8
__snake_case = 0.1
__snake_case = 8
__snake_case = 1
__snake_case = 2
__snake_case = 7
__snake_case = 10
__snake_case = 1E-8
__snake_case = 1E5
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return asdict(self )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 215 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = 42
class _lowerCAmelCase( _a , _a):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 32 , UpperCAmelCase = 64 , UpperCAmelCase = 20 , UpperCAmelCase = 7_68 , UpperCAmelCase=77 , UpperCAmelCase=4 , UpperCAmelCase = 0.0 , UpperCAmelCase = "silu" , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "linear" , UpperCAmelCase = "prd" , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , )-> Optional[int]:
super().__init__()
__A = num_attention_heads
__A = attention_head_dim
__A = num_attention_heads * attention_head_dim
__A = additional_embeddings
__A = time_embed_dim or inner_dim
__A = embedding_proj_dim or embedding_dim
__A = clip_embed_dim or embedding_dim
__A = Timesteps(UpperCAmelCase , UpperCAmelCase , 0 )
__A = TimestepEmbedding(UpperCAmelCase , UpperCAmelCase , out_dim=UpperCAmelCase , act_fn=UpperCAmelCase )
__A = nn.Linear(UpperCAmelCase , UpperCAmelCase )
if embedding_proj_norm_type is None:
__A = None
elif embedding_proj_norm_type == "layer":
__A = nn.LayerNorm(UpperCAmelCase )
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__A = nn.Linear(UpperCAmelCase , UpperCAmelCase )
if encoder_hid_proj_type is None:
__A = None
elif encoder_hid_proj_type == "linear":
__A = nn.Linear(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__A = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase ) )
if added_emb_type == "prd":
__A = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase ) )
elif added_emb_type is None:
__A = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__A = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , activation_fn='''gelu''' , attention_bias=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
if norm_in_type == "layer":
__A = nn.LayerNorm(UpperCAmelCase )
elif norm_in_type is None:
__A = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." )
__A = nn.LayerNorm(UpperCAmelCase )
__A = nn.Linear(UpperCAmelCase , UpperCAmelCase )
__A = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__A = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , UpperCAmelCase , persistent=UpperCAmelCase )
__A = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
__A = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE__ ( self )-> Dict[str, AttentionProcessor]:
__A = {}
def fn_recursive_add_processors(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , '''set_processor''' ):
__A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , UpperCAmelCase , UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return processors
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Any:
__A = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(UpperCAmelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , '''set_processor''' ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
module.set_processor(UpperCAmelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , UpperCAmelCase , UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
self.set_attn_processor(AttnProcessor() )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , )-> List[str]:
__A = hidden_states.shape[0]
__A = timestep
if not torch.is_tensor(UpperCAmelCase ):
__A = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
__A = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__A = timesteps * torch.ones(UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
__A = self.time_proj(UpperCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__A = timesteps_projected.to(dtype=self.dtype )
__A = self.time_embedding(UpperCAmelCase )
if self.embedding_proj_norm is not None:
__A = self.embedding_proj_norm(UpperCAmelCase )
__A = self.embedding_proj(UpperCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__A = self.encoder_hidden_states_proj(UpperCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__A = self.proj_in(UpperCAmelCase )
__A = self.positional_embedding.to(hidden_states.dtype )
__A = []
__A = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__A = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__A = hidden_states[:, None, :]
__A = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__A = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase , -1 , -1 )
additional_embeds.append(UpperCAmelCase )
__A = torch.cat(
UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__A = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__A = F.pad(
UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__A = hidden_states + positional_embeddings
if attention_mask is not None:
__A = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__A = F.pad(UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 )
__A = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__A = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__A = self.norm_in(UpperCAmelCase )
for block in self.transformer_blocks:
__A = block(UpperCAmelCase , attention_mask=UpperCAmelCase )
__A = self.norm_out(UpperCAmelCase )
if self.prd_embedding is not None:
__A = hidden_states[:, -1]
else:
__A = hidden_states[:, additional_embeddings_len:]
__A = self.proj_to_clip_embeddings(UpperCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Tuple:
__A = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 341 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCamelCase : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(snake_case , snake_case )
if weight_type is not None:
__A = getattr(snake_case , snake_case ).shape
else:
__A = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A = None
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
elif name.split('''.''' )[0] == "proj":
__A = fairseq_model.proj
__A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A = True
if "*" in mapped_key:
__A = name.split(snake_case )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , snake_case )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
__A = '''weight'''
else:
__A = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = emb.weight.shape
__A = nn.Linear(snake_case , snake_case , bias=snake_case )
__A = emb.weight.data
return lin_layer
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
with open(snake_case , '''r''' , encoding='''utf-8''' ) as f:
__A = f.readlines()
__A = [line.split(''' ''' )[0] for line in lines]
__A = len(snake_case )
__A = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Dict:
'''simple docstring'''
__A = WavaVecaConfig.from_pretrained(snake_case )
__A = SpeechaTextaConfig.from_pretrained(
snake_case , vocab_size=snake_case , decoder_layers=snake_case , do_stable_layer_norm=snake_case )
__A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
# set weights for wav2vec2 encoder
__A = WavaVecaModel(snake_case )
__A = recursively_load_weights_wavaveca(model.encoder , snake_case )
__A = SpeechaTextaForCausalLM(snake_case )
__A , __A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__A = SpeechEncoderDecoderModel(encoder=snake_case , decoder=snake_case )
__A = False
# add projection layer
__A = nn.Parameter(projection_layer.weight )
__A = nn.Parameter(projection_layer.bias )
__A = create_vocab_dict(snake_case )
with open(os.path.join(snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(snake_case , snake_case )
__A = SpeechaTextaTokenizer(os.path.join(snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(snake_case )
__A = hf_wavavec.config.to_dict()
__A = tokenizer.pad_token_id
__A = tokenizer.bos_token_id
__A = tokenizer.eos_token_id
__A = '''speech_to_text_2'''
__A = '''wav2vec2'''
__A = SpeechEncoderDecoderConfig.from_dict(snake_case )
hf_wavavec.save_pretrained(snake_case )
feature_extractor.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCamelCase : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 341 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : str ,A : List[str]=7 ,A : Optional[Any]=3 ,A : int=18 ,A : Union[str, Any]=30 ,A : Dict=4_00 ,A : int=True ,A : Tuple=None ,A : Optional[int]=True ,A : str=None ,A : Optional[Any]=True ,A : List[str]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,A : Dict=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,A : Tuple=True ,):
__A = size if size is not None else {"height": 2_24, "width": 2_24}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_convert_rgb
def UpperCamelCase_ ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase_ ( self : str ,A : List[Any]=False ,A : int=False ,A : Optional[int]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__A = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__A = []
for i in range(self.batch_size ):
__A , __A = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__A = [torch.from_numpy(A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = ChineseCLIPImageProcessingTester(self ,do_center_crop=A )
@property
def UpperCamelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
self.assertTrue(hasattr(A ,"do_convert_rgb" ) )
def UpperCamelCase_ ( self : Dict ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 2_24, "width": 2_24} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = self.image_processor_tester.prepare_inputs(equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : Any ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = self.image_processor_tester.prepare_inputs(equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : Any ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = self.image_processor_tester.prepare_inputs(equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=A )
__A = 3
@property
def UpperCamelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Dict ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
self.assertTrue(hasattr(A ,"do_convert_rgb" ) )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = self.image_processor_tester.prepare_inputs(equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 55 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
)
def __lowerCamelCase ( ) ->None:
snake_case__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ = math.log(len(UpperCAmelCase_ ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int=7 , _SCREAMING_SNAKE_CASE : Dict=3 , _SCREAMING_SNAKE_CASE : str=18 , _SCREAMING_SNAKE_CASE : str=30 , _SCREAMING_SNAKE_CASE : int=400 , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Any=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , _SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , _SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 716 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : str = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = XLNetTokenizer
lowercase__ = XLNetTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Dict = XLNetTokenizer(a_, keep_accents=a_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = """<s>"""
_snake_case : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """<eod>""" )
self.assertEqual(len(a_ ), 1_006 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_000 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = XLNetTokenizer(a_, keep_accents=a_ )
_snake_case : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382] )
_snake_case : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = XLNetTokenizer(a_, do_lower_case=a_ )
_snake_case : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
], )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""▁he""", """ll""", """o"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = XLNetTokenizer(a_, do_lower_case=a_ )
_snake_case : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
], )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_snake_case : str = tokenizer.encode("""sequence builders""", add_special_tokens=a_ )
_snake_case : Union[str, Any] = tokenizer.encode("""multi-sequence build""", add_special_tokens=a_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : str = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = {"""input_ids""": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""xlnet-base-cased""", revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""", )
| 609 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowercase:
'''simple docstring'''
def __init__( self: str, a_: Dict, a_: List[str], a_: bool = True, a_: bool = False ):
'''simple docstring'''
_snake_case : Tuple = scheduler
_snake_case : Optional[Any] = optimizers if isinstance(a_, (list, tuple) ) else [optimizers]
_snake_case : str = split_batches
_snake_case : List[str] = step_with_optimizer
_snake_case : Tuple = GradientState()
def UpperCamelCase_ ( self: Optional[int], *a_: Optional[Any], **a_: Optional[int] ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a_, **a_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a_, **a_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_snake_case : Tuple = AcceleratorState().num_processes
for _ in range(a_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler, """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a_, **a_ )
else:
self.scheduler.step(*a_, **a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.scheduler.state_dict()
def UpperCamelCase_ ( self: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
self.scheduler.load_state_dict(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.scheduler.get_lr()
def UpperCamelCase_ ( self: Any, *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
return self.scheduler.print_lr(*a_, **a_ )
| 609 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
A : int = BlenderbotSmallConfig
A : int = {}
A : List[str] = """gelu"""
def __init__( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=13 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[str]=99 , _lowerCAmelCase : str=32 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : List[Any]=37 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : int=20 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , ):
__snake_case : Any = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : Tuple = use_labels
__snake_case : Dict = vocab_size
__snake_case : Any = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : List[Any] = eos_token_id
__snake_case : List[Any] = pad_token_id
__snake_case : str = bos_token_id
def snake_case__ ( self : Any ):
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : int = prepare_blenderbot_small_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
__snake_case : Tuple = TFBlenderbotSmallModel(config=lowercase__ ).get_decoder()
__snake_case : List[str] = inputs_dict["""input_ids"""]
__snake_case : int = input_ids[:1, :]
__snake_case : List[Any] = inputs_dict["""attention_mask"""][:1, :]
__snake_case : Optional[Any] = inputs_dict["""head_mask"""]
__snake_case : List[str] = 1
# first forward pass
__snake_case : Dict = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
__snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ )[0]
__snake_case : List[str] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , ):
'''simple docstring'''
if attention_mask is None:
__snake_case : Optional[int] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A : Dict = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A : Any = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A : List[Any] = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : int = True
A : Optional[int] = False
A : Dict = False
def snake_case__ ( self : int ):
__snake_case : Any = TFBlenderbotSmallModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=lowercase__ )
def snake_case__ ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : Optional[Any] = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
A : Optional[Any] = """facebook/blenderbot_small-90M"""
@cached_property
def snake_case__ ( self : int ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def snake_case__ ( self : Tuple ):
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case__ ( self : Any ):
__snake_case : List[str] = self.tokenizer(self.src_text , return_tensors="""tf""" )
__snake_case : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase__ , )
__snake_case : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 704 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = ["pixel_values"]
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__snake_case : Union[str, Any] = do_resize
__snake_case : Optional[Any] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : Dict = resample
__snake_case : Tuple = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : str = do_normalize
__snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
__snake_case : Tuple = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
__snake_case : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Dict , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : Tuple = to_numpy_array(_lowerCAmelCase )
if do_resize:
__snake_case : List[Any] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
__snake_case : Dict = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
__snake_case : int = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
__snake_case : List[Any] = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
__snake_case : Optional[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def snake_case__ ( self : List[str] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ):
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = resample if resample is not None else self.resample
__snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : List[str] = image_std if image_std is not None else self.image_std
__snake_case : Optional[Any] = size if size is not None else self.size
__snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : Any = crop_size if crop_size is not None else self.crop_size
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__snake_case : Optional[Any] = make_batched(_lowerCAmelCase )
__snake_case : int = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
__snake_case : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 390 | 0 |
import qiskit
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ :Union[str, Any] = qiskit.QuantumCircuit(a , a )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE_ :Any = qiskit.execute(a , a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 631 |
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [False] * len(a )
SCREAMING_SNAKE_CASE_ :List[Any] = []
queue.append(a )
SCREAMING_SNAKE_CASE_ :int = True
while queue:
SCREAMING_SNAKE_CASE_ :int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :Optional[int] = u
return visited[t]
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = [-1] * (len(a ))
SCREAMING_SNAKE_CASE_ :Tuple = 0
while bfs(a , a , a , a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float("Inf" )
SCREAMING_SNAKE_CASE_ :str = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ :str = min(a , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ :Dict = sink
while v != source:
SCREAMING_SNAKE_CASE_ :int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ :Any = parent[v]
return max_flow
SCREAMING_SNAKE_CASE__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 631 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = KandinskyImgaImgPipeline
lowerCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def _a ( self : int ) -> Any:
"""simple docstring"""
return 32
@property
def _a ( self : str ) -> Tuple:
"""simple docstring"""
return 32
@property
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return 1_00
@property
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__SCREAMING_SNAKE_CASE = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0 ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''cpu'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__( unittest.TestCase ):
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__SCREAMING_SNAKE_CASE = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE = '''A red cartoon frog, 4k'''
__SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 690 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ =logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 690 | 1 |
# using dfs for finding eulerian path traversal
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: str , __UpperCAmelCase: Tuple , __UpperCAmelCase: Any=None ) -> Tuple:
UpperCamelCase__ : str = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCamelCase__ : Tuple = True, True
UpperCamelCase__ : Any = dfs(_lowercase , _lowercase , _lowercase , _lowercase )
return path
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: int ) -> Dict:
UpperCamelCase__ : Any = 0
UpperCamelCase__ : str = -1
for i in range(_lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCamelCase__ : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Any ) -> List[Any]:
UpperCamelCase__ : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCamelCase__ : List[Any] = check_circuit_or_path(_lowercase , _lowercase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCamelCase__ : Union[str, Any] = 1
if check == 2:
UpperCamelCase__ : str = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCamelCase__ : str = dfs(_lowercase , _lowercase , _lowercase )
print(_lowercase )
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCamelCase__ : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCamelCase__ : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCamelCase__ : Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCamelCase__ : Dict = {
1: [],
2: []
# all degree is zero
}
UpperCamelCase__ : List[str] = 10
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 253 | import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase = NewType('DataClass', Any)
UpperCamelCase = NewType('DataClassType', Any)
def lowerCamelCase_ ( _lowercase ) -> Tuple:
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def lowerCamelCase_ ( _lowercase ) -> Callable[[str], Any]:
__A : str = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def lowerCamelCase_ ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A : Union[str, Any] = {}
if aliases is not None:
__A : Optional[Any] = aliases
if help is not None:
__A : Optional[int] = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Iterable[DataClassType]
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__A : str = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCAmelCase )
if dataclasses.is_dataclass(__UpperCAmelCase ):
__A : Tuple = [dataclass_types]
__A : Any = list(__UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = F"--{field.name}"
__A : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCAmelCase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__A : List[str] = kwargs.pop("aliases" , [] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Any = [aliases]
__A : str = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__UpperCAmelCase , "UnionType" ) and isinstance(__UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__A : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A : Union[str, Any] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A : List[Any] = (
field.type.__args__[0] if isinstance(__UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__A : List[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A : List[str] = {}
if origin_type is Literal or (isinstance(field.type , __UpperCAmelCase ) and issubclass(field.type , __UpperCAmelCase )):
if origin_type is Literal:
__A : List[Any] = field.type.__args__
else:
__A : Tuple = [x.value for x in field.type]
__A : Optional[int] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__A : Optional[int] = field.default
else:
__A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A : Any = copy(__UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__A : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__A : Tuple = "?"
# This is the value that will get picked if we do --field_name (without value)
__A : List[str] = True
elif isclass(__UpperCAmelCase ) and issubclass(__UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = field.type.__args__[0]
__A : Dict = "+"
if field.default_factory is not dataclasses.MISSING:
__A : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
__A : int = True
else:
__A : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
__A : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__A : Dict = field.default_factory()
else:
__A : Dict = True
parser.add_argument(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A : Any = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , "_argument_group_name" ):
__A : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
__A : Union[str, Any] = self
try:
__A : Dict[str, type] = get_type_hints(__UpperCAmelCase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCAmelCase ):
__A : Any = ".".join(map(__UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__UpperCAmelCase ):
if not field.init:
continue
__A : str = type_hints[field.name]
self._parse_dataclass_field(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A : Any = []
if args_filename:
args_files.append(Path(__UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(__UpperCAmelCase , type=__UpperCAmelCase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A : int = args_file_parser.parse_known_args(args=__UpperCAmelCase )
__A : Optional[int] = vars(__UpperCAmelCase ).get(args_file_flag.lstrip("-" ) , __UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCAmelCase ) for p in cmd_args_file_paths] )
__A : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A : List[Any] = self.parse_known_args(args=__UpperCAmelCase )
__A : str = []
for dtype in self.dataclass_types:
__A : Optional[int] = {f.name for f in dataclasses.fields(__UpperCAmelCase ) if f.init}
__A : Dict = {k: v for k, v in vars(__UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCAmelCase , __UpperCAmelCase )
__A : str = dtype(**__UpperCAmelCase )
outputs.append(__UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = False ):
__A : int = set(args.keys() )
__A : Optional[int] = []
for dtype in self.dataclass_types:
__A : Any = {f.name for f in dataclasses.fields(__UpperCAmelCase ) if f.init}
__A : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A : Dict = dtype(**__UpperCAmelCase )
outputs.append(__UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__UpperCAmelCase )}" )
return tuple(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = False ):
with open(Path(__UpperCAmelCase ) , encoding="utf-8" ) as open_json_file:
__A : Optional[int] = json.loads(open_json_file.read() )
__A : Tuple = self.parse_dict(__UpperCAmelCase , allow_extra_keys=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = False ):
__A : List[Any] = self.parse_dict(yaml.safe_load(Path(__UpperCAmelCase ).read_text() ) , allow_extra_keys=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 520 | 0 |
"""simple docstring"""
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
snake_case__ : Tuple = first & second
first ^= second
snake_case__ : Optional[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input("""Enter the first number: """).strip())
lowercase = int(input("""Enter the second number: """).strip())
print(f"{add(first, second) = }")
| 150 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A__ ( _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1.0e4 , _UpperCAmelCase : bool = False , _UpperCAmelCase : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
snake_case__ : List[Any] = float(embedding_dim // 2 )
snake_case__ : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
snake_case__ : Any = min_timescale * jnp.exp(jnp.arange(_UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
snake_case__ : Optional[int] = jnp.expand_dims(_UpperCAmelCase , 1 ) * jnp.expand_dims(_UpperCAmelCase , 0 )
# scale embeddings
snake_case__ : List[Any] = scale * emb
if flip_sin_to_cos:
snake_case__ : Any = jnp.concatenate([jnp.cos(_UpperCAmelCase ), jnp.sin(_UpperCAmelCase )] , axis=1 )
else:
snake_case__ : Optional[int] = jnp.concatenate([jnp.sin(_UpperCAmelCase ), jnp.cos(_UpperCAmelCase )] , axis=1 )
snake_case__ : Any = jnp.reshape(_UpperCAmelCase , [jnp.shape(_UpperCAmelCase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE_ ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1")(lowerCamelCase__)
snake_case__ : Optional[Any] = nn.silu(lowerCamelCase__)
snake_case__ : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2")(lowerCamelCase__)
return temb
class SCREAMING_SNAKE_CASE_ ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : bool = False
__magic_name__ : float = 1
@nn.compact
def __call__( self , lowerCamelCase__) -> str:
'''simple docstring'''
return get_sinusoidal_embeddings(
lowerCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 150 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int ):
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A ( self: List[str] ):
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a = '''xvjiarui/stable-diffusion-2-inpainting'''
_a = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ )
_a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = num_samples * [init_image]
_a = num_samples * [mask_image]
_a = pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# shard inputs and rng
_a = replicate(UpperCamelCase__ )
_a = jax.random.split(UpperCamelCase__ , jax.device_count() )
_a = shard(UpperCamelCase__ )
_a = shard(UpperCamelCase__ )
_a = shard(UpperCamelCase__ )
_a = pipeline(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ )
_a = output.images.reshape(UpperCamelCase__ , 512 , 512 , 3 )
_a = images[0, 253:256, 253:256, -1]
_a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 711 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = '▁'
lowerCamelCase :Optional[Any] = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase :Union[str, Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase :str = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase :Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = collections.OrderedDict()
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
_a = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
_a = token.rstrip('''\n''' )
_a = index
return vocab
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: str = PRETRAINED_VOCAB_FILES_MAP
a: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: str = ["input_ids", "attention_mask"]
def __init__( self: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: List[Any]="[SEP]" , __UpperCamelCase: Optional[int]="[SEP]" , __UpperCamelCase: List[str]="[SEP]" , __UpperCamelCase: Optional[Any]="[UNK]" , __UpperCamelCase: Any="[PAD]" , __UpperCamelCase: str="[CLS]" , __UpperCamelCase: Tuple="[MASK]" , __UpperCamelCase: Optional[Dict[str, Any]] = None , **__UpperCamelCase: str , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_a = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
_a = f"[unused{i}]"
_a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_a = 12
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCamelCase )
def __getstate__( self: Tuple ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self: Optional[Any] , __UpperCamelCase: int ):
_a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
_a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self: Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def _A ( self: Dict ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: str , __UpperCamelCase: str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self: Tuple , __UpperCamelCase: Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self: List[Any] , __UpperCamelCase: str ):
_a = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def _A ( self: Dict , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _A ( self: List[Any] , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 346 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = LayoutLMTokenizer
_SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _snake_case ( self , **lowercase ) -> Tuple:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , lowercase ) -> Optional[Any]:
lowerCAmelCase = """UNwant\u00E9d,running"""
lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ) -> int:
pass
| 532 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ = json.load(f)
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase )
def _snake_case ( self , lowercase ) -> Dict:
lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _snake_case ( self , lowercase , lowercase ) -> Dict:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase = f'facebook/wmt19-{pair}'
lowerCAmelCase = self.get_tokenizer(lowercase )
lowerCAmelCase = self.get_model(lowercase )
lowerCAmelCase = bleu_data[pair]["""src"""]
lowerCAmelCase = bleu_data[pair]["""tgt"""]
lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , truncation=lowercase , padding="""longest""" ).to(lowercase )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
lowerCAmelCase = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["""bleu"""] , lowercase )
| 532 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , ):
A_ : Tuple = parent
A_ : int = batch_size
A_ : Optional[Any] = seq_length
A_ : int = is_training
A_ : List[Any] = use_attention_mask
A_ : List[str] = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Any = vocab_size
A_ : str = hidden_size
A_ : Any = num_hidden_layers
A_ : int = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Any = type_vocab_size
A_ : int = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Optional[int] = num_choices
def lowerCamelCase(self ):
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase(self ):
A_ : List[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : List[str] = config_and_inputs
A_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : List[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase(self ):
A_ : Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase(self ):
for model_class_name in self.all_model_classes:
A_ : Optional[Any] = model_class_name.from_pretrained("""albert-base-v2""" )
A_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase(self ):
A_ : List[str] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A_ : int = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase_ )
A_ : List[Any] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 480 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 480 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
__UpperCAmelCase = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = MBartTokenizer
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = vocab_file
snake_case: Tuple = False if not self.vocab_file else True
snake_case: Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
snake_case: Dict = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case: Optional[int] = src_lang if src_lang is not None else 'en_XX'
snake_case: Any = self.convert_tokens_to_ids(self._src_lang )
snake_case: Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: List[Any] = [self.sep_token_id]
snake_case: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case: Any = src_lang
snake_case: Any = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tgt_lang_id
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "en_XX" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "ro_RO" , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: str = src_lang
snake_case: Tuple = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Union[str, Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Any = []
snake_case: str = [self.eos_token_id, self.cur_lang_code]
snake_case: Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case: Dict = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case: Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = []
snake_case: List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case: int = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case: Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case: Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
snake_case: Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 329 |
def _snake_case (_snake_case : list , _snake_case : int , _snake_case : int = 0 , _snake_case : int = 0) -> int:
_lowercase =right or len(_snake_case) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_snake_case , _snake_case , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case : Tuple = """base_with_context"""
def __lowerCamelCase ( __snake_case : int, __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] =nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
A__ : List[str] =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
A__ : Union[str, Any] =weights[f"layers_{lyr_num}"]
A__ : Dict =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A__ : Any =ly_weight["""attention"""]
A__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : str =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ : List[Any] =nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
A__ : List[str] =weights[f"layers_{lyr_num}"]
A__ : Optional[Any] =ly_weight["""attention"""]
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A__ : Any =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : List[str] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
A__ : int =nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ : Union[str, Any] =weights[f"layers_{lyr_num}"]
A__ : Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A__ : str =ly_weight["""self_attention"""]
A__ : str =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Union[str, Any] =ly_weight["""MultiHeadDotProductAttention_0"""]
A__ : Dict =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
A__ : str =nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __lowerCamelCase ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
A__ : Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ : str =jnp.tree_util.tree_map(onp.array, __snake_case )
A__ : Any =[
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
A__ : Optional[Any] =os.path.join(args.checkpoint_path, """..""", """config.gin""" )
A__ : Tuple =inference.parse_training_gin_file(__snake_case, __snake_case )
A__ : List[Any] =inference.InferenceModel(args.checkpoint_path, __snake_case )
A__ : Optional[int] =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""", variance_type="""fixed_large""" )
A__ : int =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
A__ : List[str] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["""targets_context"""], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
A__ : Optional[int] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["""targets_context"""], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
A__ : Tuple =load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""], __snake_case )
A__ : List[Any] =load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""], __snake_case )
A__ : int =load_decoder(ta_checkpoint["""target"""]["""decoder"""], __snake_case )
A__ : Tuple =OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
A__ : Dict =SpectrogramDiffusionPipeline(
notes_encoder=__snake_case, continuous_encoder=__snake_case, decoder=__snake_case, scheduler=__snake_case, melgan=__snake_case, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
__snake_case : Dict = parser.parse_args()
main(args)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 2_55 , SCREAMING_SNAKE_CASE_=True , ):
'''simple docstring'''
lowercase__ : Tuple = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowercase__ : Dict = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Union[str, Any] = min_resolution
lowercase__ : List[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : List[str] = do_normalize
lowercase__ : List[str] = image_mean
lowercase__ : Union[str, Any] = image_std
lowercase__ : List[str] = do_rescale
lowercase__ : str = rescale_factor
lowercase__ : Dict = do_pad
def lowercase__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False):
'''simple docstring'''
if not batched:
lowercase__ : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image):
lowercase__ , lowercase__ : Any = image.size
else:
lowercase__ , lowercase__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Any = int(self.size["""shortest_edge"""] * h / w)
lowercase__ : Dict = self.size["""shortest_edge"""]
elif w > h:
lowercase__ : Dict = self.size["""shortest_edge"""]
lowercase__ : str = int(self.size["""shortest_edge"""] * w / h)
else:
lowercase__ : Union[str, Any] = self.size["""shortest_edge"""]
lowercase__ : int = self.size["""shortest_edge"""]
else:
lowercase__ : Tuple = []
for image in image_inputs:
lowercase__ , lowercase__ : Tuple = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase__ : Optional[int] = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: item[0])[0]
lowercase__ : Dict = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = ConditionalDetrImageProcessingTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33})
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_)
lowercase__ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Any = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase__ , lowercase__ : Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
lowercase__ : Optional[Any] = json.loads(f.read())
lowercase__ : Union[str, Any] = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowercase__ : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""")
lowercase__ : int = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""")
# verify pixel values
lowercase__ : Tuple = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE_)
lowercase__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4))
# verify area
lowercase__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE_))
# verify boxes
lowercase__ : List[Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3))
# verify image_id
lowercase__ : List[str] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE_))
# verify is_crowd
lowercase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE_))
# verify class_labels
lowercase__ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE_))
# verify orig_size
lowercase__ : Optional[int] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE_))
# verify size
lowercase__ : Optional[Any] = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE_))
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
lowercase__ : List[str] = json.loads(f.read())
lowercase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowercase__ : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
lowercase__ : str = ConditionalDetrImageProcessor(format="""coco_panoptic""")
lowercase__ : Any = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""")
# verify pixel values
lowercase__ : Dict = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE_)
lowercase__ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4))
# verify area
lowercase__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE_))
# verify boxes
lowercase__ : str = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE_)
lowercase__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3))
# verify image_id
lowercase__ : List[str] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE_))
# verify is_crowd
lowercase__ : int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE_))
# verify class_labels
lowercase__ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE_))
# verify masks
lowercase__ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , SCREAMING_SNAKE_CASE_)
# verify orig_size
lowercase__ : List[str] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE_))
# verify size
lowercase__ : Union[str, Any] = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE_))
| 12 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_lowerCAmelCase =remove_duplicates(key.upper() )
_lowerCAmelCase =len(a__ )
# First fill cipher with key characters
_lowerCAmelCase ={alphabet[i]: char for i, char in enumerate(a__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(a__ ) , 2_6 ):
_lowerCAmelCase =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase =alphabet[i - offset]
_lowerCAmelCase =char
return cipher_alphabet
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return "".join(cipher_map.get(a__ , a__ ) for ch in message.upper() )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(a__ , a__ ) for ch in message.upper() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =input('Enter message to encode or decode: ' ).strip()
_lowerCAmelCase =input('Enter keyword: ' ).strip()
_lowerCAmelCase =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_lowerCAmelCase ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_lowerCAmelCase =create_cipher_map(a__ )
print(func(a__ , a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 |
from statistics import mean, stdev
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Dict = min(UpperCamelCase )
lowerCamelCase__ : List[str] = max(UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCamelCase ) for x in data]
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = mean(UpperCamelCase )
lowerCamelCase__ : Tuple = stdev(UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , UpperCamelCase ) for x in data]
| 157 | 1 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 703 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int ) -> str:
"""simple docstring"""
a : list[list[str]] = [[] for _ in range(snake_case )]
a : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(snake_case ) <= key:
return input_string
for position, character in enumerate(snake_case ):
a : List[str] = position % (lowest * 2) # puts it in bounds
a : Any = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case )
a : Union[str, Any] = [''.join(snake_case ) for row in temp_grid]
a : List[Any] = ''.join(snake_case )
return output_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int ) -> str:
"""simple docstring"""
a : str = []
a : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
a : list[list[str]] = [[] for _ in range(snake_case )] # generates template
for position in range(len(snake_case ) ):
a : Tuple = position % (lowest * 2) # puts it in bounds
a : List[str] = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
a : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
a : List[Any] = input_string[counter : counter + len(snake_case )]
grid.append(list(snake_case ) )
counter += len(snake_case )
a : Optional[Any] = '' # reads as zigzag
for position in range(len(snake_case ) ):
a : List[str] = position % (lowest * 2) # puts it in bounds
a : Union[str, Any] = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> dict[int, str]:
"""simple docstring"""
a : Dict = {}
for key_guess in range(1 , len(snake_case ) ): # tries every key
a : Union[str, Any] = decrypt(snake_case , snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 | 0 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase__ = 'scheduler_config.json'
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
snake_case_ = 3
snake_case_ = 4
snake_case_ = 5
snake_case_ = 6
snake_case_ = 7
snake_case_ = 8
snake_case_ = 9
snake_case_ = 10
snake_case_ = 11
snake_case_ = 12
snake_case_ = 13
snake_case_ = 14
@dataclass
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 42
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = SCHEDULER_CONFIG_NAME
snake_case_ = []
snake_case_ = True
@classmethod
def __lowercase ( cls : Optional[int] , UpperCamelCase_ : Dict[str, Any] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : int=False , **UpperCamelCase_ : str , ) -> Any:
'''simple docstring'''
_lowercase : int = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase_ , subfolder=UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , return_commit_hash=UpperCamelCase_ , **UpperCamelCase_ , )
return cls.from_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , **UpperCamelCase_ )
def __lowercase ( self : int , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Tuple ) -> Dict:
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase_ , push_to_hub=UpperCamelCase_ , **UpperCamelCase_ )
@property
def __lowercase ( self : Any ) -> Dict:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def __lowercase ( cls : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
_lowercase : str = importlib.import_module(__name__.split('''.''' )[0] )
_lowercase : Any = [
getattr(UpperCamelCase_ , UpperCamelCase_ ) for c in compatible_classes_str if hasattr(UpperCamelCase_ , UpperCamelCase_ )
]
return compatible_classes
| 712 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Dict:
a_ : List[str] = parent
a_ : str = batch_size
a_ : Any = image_size
a_ : List[Any] = patch_size
a_ : Dict = num_channels
a_ : Union[str, Any] = is_training
a_ : Dict = use_labels
a_ : Optional[int] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : int = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : Any = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Optional[Any] = type_sequence_label_size
a_ : Dict = initializer_range
a_ : int = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : Any = (image_size // patch_size) ** 2
a_ : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : List[str] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
a_ : Tuple = TFViTModel(config=__SCREAMING_SNAKE_CASE )
a_ : Dict = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
a_ : List[Any] = self.image_size // 2
a_ : int = pixel_values[:, :, :image_size, :image_size]
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
a_ : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> int:
a_ : int = self.type_sequence_label_size
a_ : int = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
a_ : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
a_ : List[str] = self.image_size // 2
a_ : List[str] = pixel_values[:, :, :image_size, :image_size]
a_ : int = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : int = 1
a_ : Union[str, Any] = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
a_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : List[Any] = config_and_inputs
a_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
snake_case__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : Tuple = TFViTModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : str = model_class(__SCREAMING_SNAKE_CASE )
a_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : str = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
a_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : List[Any] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
a_ : Any = self.default_image_processor
a_ : Any = prepare_img()
a_ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
a_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
a_ : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : int = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
| 466 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( __A : int , __A : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) as f:
a_ : Optional[Any] = json.load(__A )
a_ : List[Any] = {}
a_ : List[Any] = []
a_ : Tuple = []
for key, info in class_info.items():
a_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__A ) )
a_ : Optional[Any] = thing_ids
a_ : str = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=255 , __SCREAMING_SNAKE_CASE : List[Any]="shi-labs/oneformer_demo" , __SCREAMING_SNAKE_CASE : List[str]="ade20k_panoptic.json" , __SCREAMING_SNAKE_CASE : List[Any]=10 , ) -> Dict:
a_ : int = parent
a_ : Optional[Any] = batch_size
a_ : str = num_channels
a_ : Tuple = min_resolution
a_ : List[Any] = max_resolution
a_ : List[Any] = do_resize
a_ : Union[str, Any] = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
a_ : Dict = do_normalize
a_ : Union[str, Any] = image_mean
a_ : Dict = image_std
a_ : int = class_info_file
a_ : List[Any] = prepare_metadata(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = num_text
a_ : Any = repo_path
# for the post_process_functions
a_ : List[str] = 2
a_ : Tuple = 10
a_ : Union[str, Any] = 10
a_ : Dict = 3
a_ : int = 4
a_ : Optional[Any] = num_labels
a_ : Union[str, Any] = do_reduce_labels
a_ : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Optional[Any]:
if not batched:
a_ : List[Any] = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
a_ , a_ : List[str] = image.size
else:
a_ , a_ : Any = image.shape[1], image.shape[2]
if w < h:
a_ : int = int(self.size['''shortest_edge'''] * h / w )
a_ : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
a_ : Any = self.size['''shortest_edge''']
a_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
a_ : Optional[int] = self.size['''shortest_edge''']
a_ : int = self.size['''shortest_edge''']
else:
a_ : int = []
for image in image_inputs:
a_ , a_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
a_ : str = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''ignore_index''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''class_info_file''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_text''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''repo_path''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''metadata''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# Initialize image_processor
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Optional[int] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Any = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Initialize image_processor
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : int = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Tuple = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : int = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str="np" ) -> Any:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a_ : Optional[Any] = self.image_processing_tester.num_labels
a_ : Union[str, Any] = None
a_ : int = None
a_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
a_ : List[str] = num_labels
if is_instance_map:
a_ : str = list(range(__SCREAMING_SNAKE_CASE ) ) * 2
a_ : str = dict(enumerate(__SCREAMING_SNAKE_CASE ) )
a_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a_ : Any = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for annotation in annotations]
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , instance_id_to_semantic_id=__SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE , )
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
def common(__SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=None ):
a_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=__SCREAMING_SNAKE_CASE , is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = inputs['''mask_labels''']
a_ : Any = inputs['''class_labels''']
a_ : Any = inputs['''pixel_values''']
a_ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__SCREAMING_SNAKE_CASE )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : int = np.zeros((20, 50) )
a_ : Dict = 1
a_ : Optional[Any] = 1
a_ : Dict = 1
a_ : Tuple = binary_mask_to_rle(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a_ : List[str] = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE , target_sizes=__SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : Tuple = image_processor.post_process_instance_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : int = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = image_processor.post_process_panoptic_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 466 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int = 100_0000 ) -> int:
snake_case__ : Optional[int] = 1
snake_case__ : str = 1
snake_case__ : List[str] = {1: 1}
for inputa in range(2 , _A ):
snake_case__ : List[str] = 0
snake_case__ : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
snake_case__ : Tuple = (3 * number) + 1
counter += 1
if inputa not in counters:
snake_case__ : int = counter
if counter > pre_counter:
snake_case__ : Union[str, Any] = inputa
snake_case__ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =["ConditionalDetrFeatureExtractor"]
_lowercase : Optional[int] =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Union[str, Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Tuple = True
a : List[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__lowercase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ )
__lowercase = tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = """<pad>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 1_0_1_1_2_2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowercase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__lowercase = self.tokenizer(
A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors="""pt""" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(A_ )
__lowercase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowercase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowercase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(A_ )
__lowercase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowercase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=A_ , )
| 616 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : str ={
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict =['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] =[
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 237 | """simple docstring"""
lowerCamelCase : int =[0, 2, 4, 6, 8]
lowerCamelCase : List[str] =[1, 3, 5, 7, 9]
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A : Union[str, Any] = 0
for digit in range(10 ):
__A : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
__A : Union[str, Any] = 0
for digita in range(10 ):
__A : Tuple = digita
if (remainder + digita) % 2 == 0:
__A : Union[str, Any] = ODD_DIGITS
else:
__A : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
__A : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return result
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 9 ) -> int:
'''simple docstring'''
__A : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_SCREAMING_SNAKE_CASE , 0 , [0] * length , _SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 237 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
super().__init__()
self.register_modules(vqvae=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCAmelCase , )
A : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A : str = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A : List[str] = {}
if accepts_eta:
A : Optional[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A : Any = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
A : Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A : int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# decode the image latents with the VAE
A : str = self.vqvae.decode(__UpperCAmelCase ).sample
A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
A : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 542 | 0 |
import math
def _a ( UpperCamelCase_ : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while num > 0:
lowerCAmelCase__ = num % 8
lowerCAmelCase__ = octal + (remainder * math.floor(math.pow(10 , UpperCamelCase_ ) ))
counter += 1
lowerCAmelCase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(UpperCamelCase_ )}"
def _a ( ) -> None:
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 115 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = NystromformerForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = NystromformerForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = NystromformerForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =False
a_ =False
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = NystromformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase )[0]
lowerCAmelCase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "the [MASK] of Belgium is Brussels"
lowerCAmelCase__ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(encoding.input_ids ).logits
lowerCAmelCase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCAmelCase ) , "capital" )
| 115 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : List[str] ="ViltImageProcessor"
lowerCamelCase : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , a : Tuple=None , a : Any=None , **a : Tuple ):
"""simple docstring"""
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
__lowerCamelCase = self.image_processor
def __call__( self : List[str] , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
__lowerCamelCase = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : List[str] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Any , **a : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 546 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 546 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'blip_text_model'
def __init__( self : Optional[int] , __snake_case : Any=30_524 , __snake_case : str=768 , __snake_case : Dict=768 , __snake_case : Dict=3_072 , __snake_case : List[Any]=768 , __snake_case : Optional[Any]=12 , __snake_case : int=8 , __snake_case : Tuple=512 , __snake_case : Optional[int]="gelu" , __snake_case : List[str]=1E-12 , __snake_case : Union[str, Any]=0.0 , __snake_case : Dict=0.0 , __snake_case : List[str]=0.02 , __snake_case : Optional[Any]=30_522 , __snake_case : List[str]=2 , __snake_case : str=0 , __snake_case : Union[str, Any]=102 , __snake_case : Any=True , __snake_case : Tuple=True , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = encoder_hidden_size
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = projection_dim
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = is_decoder
UpperCAmelCase_ : int = use_cache
@classmethod
def _lowerCamelCase ( cls : Any , __snake_case : Union[str, os.PathLike] , **__snake_case : str ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
UpperCAmelCase_ : List[Any] = cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : Optional[int] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'blip_vision_model'
def __init__( self : Union[str, Any] , __snake_case : int=768 , __snake_case : str=3_072 , __snake_case : Dict=512 , __snake_case : Optional[Any]=12 , __snake_case : Any=12 , __snake_case : Union[str, Any]=384 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]="gelu" , __snake_case : Tuple=1E-5 , __snake_case : List[Any]=0.0 , __snake_case : Dict=1E-10 , **__snake_case : Any , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = projection_dim
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = hidden_act
@classmethod
def _lowerCamelCase ( cls : int , __snake_case : Union[str, os.PathLike] , **__snake_case : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
UpperCAmelCase_ : Tuple = cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Tuple = 'blip'
A_ : List[Any] = True
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=512 , __snake_case : Optional[int]=2.6_592 , __snake_case : Tuple=256 , **__snake_case : Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
if text_config is None:
UpperCAmelCase_ : Optional[int] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCAmelCase_ : Tuple = BlipTextConfig(**__snake_case )
UpperCAmelCase_ : Tuple = BlipVisionConfig(**__snake_case )
UpperCAmelCase_ : Any = self.vision_config.hidden_size
UpperCAmelCase_ : str = projection_dim
UpperCAmelCase_ : str = logit_scale_init_value
UpperCAmelCase_ : Optional[int] = 1.0
UpperCAmelCase_ : Dict = 0.02
UpperCAmelCase_ : List[str] = image_text_hidden_size
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : BlipTextConfig , __snake_case : BlipVisionConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.text_config.to_dict()
UpperCAmelCase_ : Tuple = self.vision_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output | 703 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 641 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"([A-Z]+)([A-Z][a-z])")
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"([a-z\d])([A-Z])")
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"(?<!_)_(?!_)")
SCREAMING_SNAKE_CASE : Tuple = re.compile(r"(_{2,})")
SCREAMING_SNAKE_CASE : Any = r"^\w+(\.\w+)*$"
SCREAMING_SNAKE_CASE : Any = r"<>:/\|?*"
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
lowercase_ :List[str] = _uppercase_uppercase_re.sub(R'''\1_\2''' , _a )
lowercase_ :Optional[int] = _lowercase_uppercase_re.sub(R'''\1_\2''' , _a )
return name.lower()
def UpperCamelCase ( _a ) -> Dict:
'''simple docstring'''
lowercase_ :List[str] = _single_underscore_re.split(_a )
lowercase_ :Tuple = [_multiple_underscores_re.split(_a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_a ) if n != '''''' )
def UpperCamelCase ( _a ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(_a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_a )
def UpperCamelCase ( _a , _a ) -> Optional[Any]:
'''simple docstring'''
if os.path.basename(_a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_a )}-{split}"
def UpperCamelCase ( _a , _a , _a , _a=None ) -> List[Any]:
'''simple docstring'''
lowercase_ :List[str] = filename_prefix_for_split(_a , _a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase_ :List[Any] = os.path.join(_a , _a )
return f"{filepath}*"
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None ) -> List[Any]:
'''simple docstring'''
lowercase_ :Union[str, Any] = filename_prefix_for_split(_a , _a )
lowercase_ :Any = os.path.join(_a , _a )
if shard_lengths:
lowercase_ :str = len(_a )
lowercase_ :Tuple = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_a )]
if filetype_suffix:
lowercase_ :Any = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase_ :Tuple = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 257 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowercase = {"UserAgent": UserAgent().random}
def __UpperCAmelCase ( a_):
snake_case_ = script.contents[0]
snake_case_ = json.loads(data[data.find('{"config"') : -1])
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> str:
snake_case_ = F'''https://www.instagram.com/{username}/'''
snake_case_ = self.get_json()
def _UpperCamelCase ( self ) -> dict:
snake_case_ = requests.get(self.url , headers=a ).text
snake_case_ = BeautifulSoup(a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self ) -> bool:
return self.user_data["is_private"]
def __UpperCAmelCase ( a_ = "github"):
import os
if os.environ.get('CI'):
return # test failing on GitHub Actions
snake_case_ = InstagramUser(a_)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , a_)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.')
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = InstagramUser("github")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 706 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a = None , a = None ) -> int:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
snake_case_ = os.path.abspath('examples' )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='main()' if parser_only else 'training_function()' , ):
snake_case_ = compare_against_test(
os.path.join(a , a ) , a , a , a )
snake_case_ = '\n'.join(a )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(a , '' )
self.assertEqual(a , '' )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py' , a )
self.one_complete_example('complete_nlp_example.py' , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , a , a , a )
self.one_complete_example('complete_cv_example.py' , a , a , a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _UpperCamelCase ( cls ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
else:
self.assertIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
snake_case_ = re.findall('({.+})' , a )
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(a )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _UpperCamelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , 'tracking' ) ) )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 607 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DistilBertTokenizer
_SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
_SCREAMING_SNAKE_CASE = True
@slow
def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 283 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_lowerCAmelCase , size=(size["height"], size["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : int=None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 283 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : int , snake_case : int ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__UpperCAmelCase : str = img
__UpperCAmelCase : List[Any] = img.shape[1]
__UpperCAmelCase : Optional[Any] = img.shape[0]
__UpperCAmelCase : Dict = dst_width
__UpperCAmelCase : List[str] = dst_height
__UpperCAmelCase : Union[str, Any] = self.src_w / self.dst_w
__UpperCAmelCase : List[str] = self.src_h / self.dst_h
__UpperCAmelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase__ ( self : Any ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__UpperCAmelCase : Any = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def lowerCamelCase__ ( self : int , snake_case : int ) -> int:
return int(self.ratio_x * x )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase :int = 8_0_0, 6_0_0
__UpperCAmelCase :Dict = imread("image_data/lena.jpg", 1)
__UpperCAmelCase :int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 266 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase :Optional[Any] = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Any = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :int = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase :Optional[int] = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 266 | 1 |
'''simple docstring'''
def lowerCAmelCase (__A = 600_851_475_143):
"""simple docstring"""
try:
_a = int(__A)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(__A)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 11 |
lowercase : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def snake_case__ ( lowerCamelCase_ ):
A : List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase : list[bool | None] = [None] * 10_00_00_00
lowercase : int = True
lowercase : Tuple = False
def snake_case__ ( lowerCamelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A : int = chain(next_number(lowerCamelCase_ ) )
A : Dict = number_chain
while number < 10000000:
A : Any = number_chain
number *= 10
return number_chain
def snake_case__ ( lowerCamelCase_ = 10000000 ):
for i in range(1 , lowerCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 542 | 0 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Any = [int(A_) for i in ip_va_address.split(".") if i.isdigit()]
return len(A_) == 4 and all(0 <= int(A_) <= 2_54 for octet in octets)
if __name__ == "__main__":
A__: str = input().strip()
A__: Dict = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 221 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" ,[None, 4_00 * 2**20, 6_00 * 2**20])
@pytest.mark.parametrize("input_in_memory_max_size" ,["default", 0, 1_00 * 2**20, 9_00 * 2**20])
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,"IN_MEMORY_MAX_SIZE" ,A_)
UpperCamelCase__: List[str] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase__: List[Any] = dataset_size < in_memory_max_size
else:
UpperCamelCase__: int = False
UpperCamelCase__: int = is_small_dataset(A_)
assert result == expected
| 221 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = '''▁'''
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Tuple = BertGenerationTokenizer
UpperCamelCase :int = False
UpperCamelCase :Union[str, Any] = True
def _snake_case (self ):
super().setUp()
lowerCamelCase__ : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = """<s>"""
lowerCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1002 )
def _snake_case (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case (self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case (self ):
lowerCamelCase__ : List[str] = """Hello World!"""
lowerCamelCase__ : List[Any] = [18536, 2260, 101]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def _snake_case (self ):
lowerCamelCase__ : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def _snake_case (self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : Optional[Any] = """ """.join(__magic_name__ )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase__ : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase__ : List[Any] = BertGenerationConfig()
lowerCamelCase__ : List[Any] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def _snake_case (self ):
# fmt: off
lowerCamelCase__ : Any = {"""input_ids""": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 157 |
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
for _ in range(UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 157 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "van"
def __init__( self , _A=2_2_4 , _A=3 , _A=[7, 3, 3, 3] , _A=[4, 2, 2, 2] , _A=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A=[3, 3, 1_2, 3] , _A=[8, 8, 4, 4] , _A="gelu" , _A=0.02 , _A=1E-6 , _A=1E-2 , _A=0.0 , _A=0.0 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_sizes
_SCREAMING_SNAKE_CASE =strides
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =mlp_ratios
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =layer_scale_init_value
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =dropout_rate
| 165 |
"""simple docstring"""
def _lowerCAmelCase(a : str ) -> str:
_SCREAMING_SNAKE_CASE =0
# if input_string is "aba" than new_input_string become "a|b|a"
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0
# length[i] shows the length of palindromic substring with center i
_SCREAMING_SNAKE_CASE =[1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
_SCREAMING_SNAKE_CASE =0
for j in range(len(a ) ):
_SCREAMING_SNAKE_CASE =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_SCREAMING_SNAKE_CASE =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_SCREAMING_SNAKE_CASE =j - k + 1 # noqa: E741
_SCREAMING_SNAKE_CASE =j + k - 1
# update max_length and start position
if max_length < length[j]:
_SCREAMING_SNAKE_CASE =length[j]
_SCREAMING_SNAKE_CASE =j
# create that string
_SCREAMING_SNAKE_CASE =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 1 |
def _a ( SCREAMING_SNAKE_CASE = 1_00 ):
"""simple docstring"""
lowercase__ = n * (n + 1) * (2 * n + 1) / 6
lowercase__ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 43 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''longformer'''
def __init__( self , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 1 , UpperCamelCase__ = 0 , UpperCamelCase__ = 2 , UpperCamelCase__ = 3_0522 , UpperCamelCase__ = 768 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = 3072 , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1e-12 , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : int = attention_window
snake_case : Any = sep_token_id
snake_case : Dict = bos_token_id
snake_case : int = eos_token_id
snake_case : List[str] = vocab_size
snake_case : Dict = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Any = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : str = onnx_export
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = True
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case : str = super().outputs
if self.task == "default":
snake_case : str = {0: "batch"}
return outputs
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case : Any = 1
return inputs
| 178 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase ( a__ : Tuple ) -> List[str]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def lowercase ( a__ : Tuple ) -> int:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A_ , id=A_ )
| 710 | """simple docstring"""
from timeit import timeit
UpperCAmelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = len(a__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a__ ) )
def lowercase ( a__ : str ) -> bool:
if len(a__ ) <= 2:
return True
if s[0] == s[len(a__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( a__ : str ) -> bool:
return s == s[::-1]
def lowercase ( a__ : str ) -> None:
_UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase = F'''from __main__ import test_data, {name}'''
_UpperCamelCase = 500000
_UpperCamelCase = timeit(stmt=a__ , setup=a__ , number=a__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 342 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
sde_ve.to(UpperCamelCase__ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase__ )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2, output_type='numpy', generator=UpperCamelCase__ ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2, output_type='numpy', generator=UpperCamelCase__, return_dict=UpperCamelCase__ )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = 'google/ncsnpp-church-256'
_A = UNetaDModel.from_pretrained(UpperCamelCase__ )
_A = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase__ )
_A = ScoreSdeVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
sde_ve.to(UpperCamelCase__ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase__ )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10, output_type='numpy', generator=UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107 | '''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = []
_A = 2
_A = int(math.sqrt(__snake_case ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(__snake_case )
for i in range(start * start , end + 1 , __snake_case ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , __snake_case )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__snake_case , high + 1 , __snake_case ):
_A = False
for j in range(len(__snake_case ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , __snake_case )
return prime
print(sieve(10**6))
| 107 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _a ( unittest.TestCase ):
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = get_activation('''swish''' )
self.assertIsInstance(lowercase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = get_activation('''silu''' )
self.assertIsInstance(lowercase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''mish''' )
self.assertIsInstance(lowercase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''gelu''' )
self.assertIsInstance(lowercase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 358 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A =datasets.logging.get_logger(__name__)
A ='\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
A ='\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
A ='\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
A ={
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def A ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
UpperCAmelCase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase = score.BleurtScorer(os.path.join(lowercase , lowercase ) )
def A ( self : str , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.scorer.score(references=lowercase , candidates=lowercase )
return {"scores": scores}
| 358 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = StableDiffusionXLImgaImgPipeline
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
snake_case__ : int = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase : Dict = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase : Dict = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[str] = CLIPTextModelWithProjection(__lowerCAmelCase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : str = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Any = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Any = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Dict = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe.to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : Dict = negative_prompt
_lowerCamelCase : int = 3 * [inputs['''prompt''']]
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase )
_lowerCamelCase : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase : List[str] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : int = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
_lowerCamelCase : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple="cpu" , __lowerCAmelCase : Tuple=torch.floataa , __lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase : List[Any] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_inputs(__lowerCAmelCase )
_lowerCamelCase : int = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 83 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
import argparse
import struct
import unittest
class __lowerCamelCase :
def __init__( self , __snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = data
# Initialize hash values
UpperCAmelCase: Union[str, Any] = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
UpperCAmelCase: Any = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
UpperCAmelCase: List[str] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( __snake_case ) -> bytes:
"""simple docstring"""
UpperCAmelCase: str = B"\x80" + (B"\x00" * (6_3 - (len(__snake_case ) + 8) % 6_4))
UpperCAmelCase: Dict = struct.pack(">Q" , (len(__snake_case ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase: Dict = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase: Optional[Any] = list(struct.unpack(">16L" , __snake_case ) )
# add 48 0-ed integers
words += [0] * 4_8
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Optional[int] = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase: List[Any] = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
UpperCAmelCase: List[str] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
UpperCAmelCase: str = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
UpperCAmelCase: str = self.ror(__snake_case , 6 ) ^ self.ror(__snake_case , 1_1 ) ^ self.ror(__snake_case , 2_5 )
UpperCAmelCase: Tuple = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
UpperCAmelCase: Optional[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
UpperCAmelCase: Tuple = self.ror(__snake_case , 2 ) ^ self.ror(__snake_case , 1_3 ) ^ self.ror(__snake_case , 2_2 )
UpperCAmelCase: Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase: Optional[Any] = (sa + maj) % 0x1_0000_0000
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Optional[int] = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
UpperCAmelCase: str = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase: List[Any] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase: List[str] = "".join([hex(__snake_case )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , __snake_case , __snake_case ) -> int:
"""simple docstring"""
return 0xFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCAmelCase: List[Any] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__snake_case ).hash , hashlib.shaaaa(__snake_case ).hexdigest() )
def __UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
UpperCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase: List[str] = parser.parse_args()
UpperCAmelCase: List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase: Dict = f.read()
else:
UpperCAmelCase: Tuple = bytes(lowerCamelCase__ , "utf-8" )
print(SHAaaa(lowerCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 721 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCamelCase ( pl.LightningModule ):
def __init__( self , __snake_case ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase: Optional[Any] = model
UpperCAmelCase: str = 2
UpperCAmelCase: Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
UpperCAmelCase: List[str] = LongformerModel.from_pretrained(snake_case_ )
UpperCAmelCase: Tuple = LightningModel(snake_case_ )
UpperCAmelCase: List[Any] = torch.load(snake_case_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCAmelCase: Dict = LongformerForQuestionAnswering.from_pretrained(snake_case_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(snake_case_ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 166 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = controlnet_params
SCREAMING_SNAKE_CASE_ : List[str] = "bird"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : int = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
SCREAMING_SNAKE_CASE_ : str = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(lowerCAmelCase__ , jax.device_count() )
SCREAMING_SNAKE_CASE_ : List[Any] = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
SCREAMING_SNAKE_CASE_ : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : List[str] = controlnet_params
SCREAMING_SNAKE_CASE_ : Optional[int] = "Chef in the kitchen"
SCREAMING_SNAKE_CASE_ : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
SCREAMING_SNAKE_CASE_ : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : int = jax.random.split(lowerCAmelCase__ , jax.device_count() )
SCREAMING_SNAKE_CASE_ : Optional[int] = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 101 |
def UpperCamelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : List[str] = equationa
_lowercase , _lowercase , _lowercase : List[str] = equationa
# Calculate the determinants of the matrices
_lowercase : List[Any] = aa * ba - aa * ba
_lowercase : Tuple = ca * ba - ca * ba
_lowercase : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : str = determinant_x / determinant
_lowercase : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 461 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = Dict[str, Any]
UpperCamelCase_ = List[Prediction]
@add_end_docstrings(__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Union[str, Any] ) -> Dict:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self ,"""vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A__ ( self: List[str] ,**lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Tuple = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self: List[str] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Optional[Any] ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = load_image(lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : int = self.image_processor(images=[image] ,return_tensors="""pt""" )
if self.tokenizer is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(text=inputs["""words"""] ,boxes=inputs["""boxes"""] ,return_tensors="""pt""" )
UpperCAmelCase_ : List[Any] = target_size
return inputs
def A__ ( self: Tuple ,lowerCamelCase_: Optional[Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = model_inputs.pop("""target_size""" )
UpperCAmelCase_ : Optional[Any] = self.model(**lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : Union[str, Any] = model_inputs["""bbox"""]
return model_outputs
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=0.9 ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = target_size[0].tolist()
def unnormalize(lowerCamelCase_: Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : Optional[int] = [unnormalize(lowerCamelCase_ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
UpperCAmelCase_ : Union[str, Any] = ["""score""", """label""", """box"""]
UpperCAmelCase_ : List[Any] = [dict(zip(lowerCamelCase_ ,lowerCamelCase_ ) ) for vals in zip(scores.tolist() ,lowerCamelCase_ ,lowerCamelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : str = self.image_processor.post_process_object_detection(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = raw_annotations[0]
UpperCAmelCase_ : Any = raw_annotation["""scores"""]
UpperCAmelCase_ : Dict = raw_annotation["""labels"""]
UpperCAmelCase_ : List[Any] = raw_annotation["""boxes"""]
UpperCAmelCase_ : Union[str, Any] = scores.tolist()
UpperCAmelCase_ : Dict = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : Optional[int] = [self._get_bounding_box(lowerCamelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : Dict = ["""score""", """label""", """box"""]
UpperCAmelCase_ : Union[str, Any] = [
dict(zip(lowerCamelCase_ ,lowerCamelCase_ ) )
for vals in zip(raw_annotation["""scores"""] ,raw_annotation["""labels"""] ,raw_annotation["""boxes"""] )
]
return annotation
def A__ ( self: Dict ,lowerCamelCase_: "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = box.int().tolist()
UpperCAmelCase_ : str = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 322 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = VOCAB_FILES_NAMES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRContextEncoderTokenizer
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = VOCAB_FILES_NAMES
A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRQuestionEncoderTokenizer
UpperCamelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase_ = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__snake_case )
class _snake_case :
'''simple docstring'''
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
elif titles is None or texts is None:
UpperCAmelCase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Any = titles if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [titles]
UpperCAmelCase_ : Tuple = texts if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [texts]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : int = questions if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.'''
UpperCAmelCase_ : int = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : str = super().__call__(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : Any = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : List[str] = attention_mask
return self.pad(lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: BatchEncoding ,lowerCamelCase_: DPRReaderOutput ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 64 ,lowerCamelCase_: int = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Optional[int] = reader_input["""input_ids"""]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sorted(range(lowerCamelCase_ ) ,reverse=lowerCamelCase_ ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase_ ,top_spans=lowerCamelCase_ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase_ ,start_index=lowerCamelCase_ ,end_index=lowerCamelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self: Any ,lowerCamelCase_: List[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Union[str, Any] = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : Optional[int] = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x[1] ,reverse=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase_ : Any = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : int = VOCAB_FILES_NAMES
A__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : int = ["input_ids", "attention_mask"]
A__ : str = DPRReaderTokenizer
| 322 | 1 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : Any = parent
__a : Dict = batch_size
__a : int = seq_length
__a : Optional[Any] = is_training
__a : List[Any] = use_input_mask
__a : int = use_token_type_ids
__a : Dict = use_labels
__a : Union[str, Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : List[Any] = intermediate_size
__a : Optional[int] = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : Any = type_vocab_size
__a : Union[str, Any] = type_sequence_label_size
__a : Union[str, Any] = initializer_range
__a : int = num_labels
__a : str = num_choices
__a : List[Any] = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : int = None
if self.use_input_mask:
__a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : Union[str, Any] = None
__a : Tuple = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__a : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
__a : str = BioGptForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[int] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# create attention mask
__a : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
__a : int = self.seq_length // 2
__a : Union[str, Any] = 0
# first forward pass
__a : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__a : Any = ids_tensor((1,) , lowerCAmelCase__ ).item() + 1
__a : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__a : List[str] = random_other_next_tokens
# append to next input_ids and attn_mask
__a : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : Optional[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase__ )] , dim=1 , )
# get two different outputs
__a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
__a : Optional[Any] = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
# select random slice
__a : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : str = output_from_no_past[:, -1, random_slice_idx].detach()
__a : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : List[str] = BioGptModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
__a : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
# first forward pass
__a : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__a : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__a : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__a : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
__a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[
'''last_hidden_state'''
]
# select random slice
__a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__a : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False ):
'''simple docstring'''
__a : Union[str, Any] = BioGptForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase ( self , __a , *__a ):
'''simple docstring'''
__a : Any = BioGptModel(lowerCAmelCase__ )
__a : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : Union[str, Any] = BioGptForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
(
__a
) : Union[str, Any] = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
A_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A_ = (BioGptForCausalLM,) if is_torch_available() else ()
A_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = BioGptModelTester(self )
__a : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase__ , gradient_checkpointing=lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase__ )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase__ )
__a : Union[str, Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__a : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__a : Tuple = tokenizer.eos_token
__a : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
__a : int = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__a : Optional[Any] = tokenizer(lowerCAmelCase__ , return_tensors='pt' , padding=lowerCAmelCase__ )
__a : Optional[Any] = inputs['''input_ids'''].to(lowerCAmelCase__ )
__a : Union[str, Any] = model.generate(
input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'].to(lowerCAmelCase__ ) , )
__a : List[Any] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
__a : Optional[Any] = model.generate(input_ids=lowerCAmelCase__ )
__a : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__a : Tuple = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
__a : Optional[int] = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
__a : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__a : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__a : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__a : Dict = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = BioGptModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = 3
__a : Union[str, Any] = input_dict['''input_ids''']
__a : List[str] = input_ids.ne(1 ).to(lowerCAmelCase__ )
__a : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a : int = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = 3
__a : Union[str, Any] = '''multi_label_classification'''
__a : Dict = input_dict['''input_ids''']
__a : Dict = input_ids.ne(1 ).to(lowerCAmelCase__ )
__a : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a : str = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__a : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__a : str = torch.tensor([[2, 4805, 9, 656, 21]] )
__a : Any = model(lowerCAmelCase__ )[0]
__a : Union[str, Any] = 4_2384
__a : Optional[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__a : Tuple = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__a : Tuple = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase__ )
torch.manual_seed(0 )
__a : int = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase__ )
__a : List[Any] = model.generate(
**lowerCAmelCase__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCAmelCase__ , )
__a : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
__a : List[str] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 476 |
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [0, 2, 4, 6, 8]
lowerCAmelCase_ : str = [1, 3, 5, 7, 9]
def UpperCAmelCase ( A : int , A : int , A : list[int] , A : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : List[Any] = 0
for digit in range(10 ):
SCREAMING_SNAKE_CASE : Optional[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , A , A )
return result
SCREAMING_SNAKE_CASE : List[str] = 0
for digita in range(10 ):
SCREAMING_SNAKE_CASE : int = digita
if (remainder + digita) % 2 == 0:
SCREAMING_SNAKE_CASE : str = ODD_DIGITS
else:
SCREAMING_SNAKE_CASE : List[str] = EVEN_DIGITS
for digita in other_parity_digits:
SCREAMING_SNAKE_CASE : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , A , A , )
return result
def UpperCAmelCase ( A : int = 9 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(A , 0 , [0] * length , A )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 527 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_snake_case = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48_000,
"sample_size": 131_072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
}
def _a ( __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
return torch.atana(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / math.pi * 2
def _a ( __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2
__UpperCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__()
__UpperCamelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
__UpperCamelCase = deepcopy(self.diffusion )
__UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def _a ( __lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
_snake_case = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
_snake_case = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
_snake_case = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
_snake_case = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
_snake_case = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
_snake_case = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def _a ( __lowercase ) -> str:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _a ( __lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(_SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif name.startswith(_SCREAMING_SNAKE_CASE ):
return [name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def _a ( __lowercase , __lowercase=13 ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__UpperCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
__UpperCamelCase = string[6:]
elif string.startswith('net.' ):
__UpperCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
__UpperCamelCase = string[7:]
if string.startswith('main.' ):
__UpperCamelCase = string[5:]
# mid block
if string[:2].isdigit():
__UpperCamelCase = string[:2]
__UpperCamelCase = string[2:]
else:
__UpperCamelCase = string[0]
__UpperCamelCase = string[1:]
if depth == max_depth:
__UpperCamelCase = MID_NUM_TO_LAYER[layer_num]
__UpperCamelCase = 'mid_block'
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) < 7:
__UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num]
__UpperCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) > 7:
__UpperCamelCase = UP_NUM_TO_LAYER[layer_num]
__UpperCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__UpperCamelCase = DEPTH_0_TO_LAYER[layer_num]
__UpperCamelCase = F"""up_blocks.{max_depth - 1}""" if int(_SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
__UpperCamelCase = string_left[1:]
if "resnets" in new_layer:
__UpperCamelCase = convert_resconv_naming(_SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
__UpperCamelCase = convert_attn_naming(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = new_string_left
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
__UpperCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__UpperCamelCase = rename(_SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = transform_conv_attns(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = v
return new_state_dict
def _a ( __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
__UpperCamelCase = v[:, :, 0]
else:
# bias
__UpperCamelCase = v
else:
# qkv matrices
__UpperCamelCase = v.shape[0]
__UpperCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__UpperCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _a ( __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__UpperCamelCase = download(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = MODELS_MAP[model_name]['sample_rate']
__UpperCamelCase = MODELS_MAP[model_name]['sample_size']
__UpperCamelCase = Object()
__UpperCamelCase = sample_size
__UpperCamelCase = sample_rate
__UpperCamelCase = 0
__UpperCamelCase = UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , sample_rate=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = diffusers_model.state_dict()
__UpperCamelCase = DiffusionUncond(_SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_SCREAMING_SNAKE_CASE )['state_dict'] )
__UpperCamelCase = orig_model.diffusion_ema.eval()
__UpperCamelCase = orig_model.state_dict()
__UpperCamelCase = rename_orig_weights(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_SCREAMING_SNAKE_CASE ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(_SCREAMING_SNAKE_CASE ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__UpperCamelCase = value.squeeze()
__UpperCamelCase = value
diffusers_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 100
__UpperCamelCase = 33
__UpperCamelCase = IPNDMScheduler(num_train_timesteps=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=_SCREAMING_SNAKE_CASE )[:-1]
__UpperCamelCase = get_crash_schedule(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = DanceDiffusionPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.manual_seed(33 )
__UpperCamelCase = pipe(num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).audios
__UpperCamelCase = sampling.iplms_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {} )
__UpperCamelCase = generated.clamp(-1 , 1 )
__UpperCamelCase = (generated - audio).abs().sum()
__UpperCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , _SCREAMING_SNAKE_CASE )
print('Diff max' , _SCREAMING_SNAKE_CASE )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
_snake_case = parser.parse_args()
main(args)
| 707 |
def _a ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = [False] * len(__lowercase )
__UpperCamelCase = []
queue.append(__lowercase )
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def _a ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [-1] * (len(__lowercase ))
__UpperCamelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(__lowercase , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 567 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = """dandelin/vilt-b32-finetuned-vqa"""
__snake_case : int = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__snake_case : Any = """image_qa"""
__snake_case : Optional[Any] = AutoProcessor
__snake_case : Dict = AutoModelForVisualQuestionAnswering
__snake_case : Dict = ["""image""", """text"""]
__snake_case : Optional[int] = ["""text"""]
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : Dict , UpperCAmelCase : "Image" , UpperCAmelCase : str ):
return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors="""pt""" )
def A ( self : List[Any] , UpperCAmelCase : Optional[Any] ):
with torch.no_grad():
return self.model(**UpperCAmelCase ).logits
def A ( self : int , UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 600 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __a ( __UpperCamelCase ):
__snake_case : Any = """table-transformer"""
__snake_case : Optional[Any] = ["""past_key_values"""]
__snake_case : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=3 , UpperCAmelCase : str=1_00 , UpperCAmelCase : int=6 , UpperCAmelCase : Dict=20_48 , UpperCAmelCase : Any=8 , UpperCAmelCase : str=6 , UpperCAmelCase : Any=20_48 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : List[str]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : Tuple=2_56 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Optional[int]=1.0 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple="sine" , UpperCAmelCase : Tuple="resnet50" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = backbone_config.get("""model_type""" )
lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : Optional[Any] = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = None, None, None
lowerCAmelCase_ : Any = use_timm_backbone
lowerCAmelCase_ : Any = backbone_config
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Optional[int] = num_queries
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : List[str] = encoder_layers
lowerCAmelCase_ : Any = encoder_attention_heads
lowerCAmelCase_ : int = decoder_ffn_dim
lowerCAmelCase_ : List[Any] = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : List[str] = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : Optional[Any] = activation_function
lowerCAmelCase_ : List[Any] = init_std
lowerCAmelCase_ : List[str] = init_xavier_std
lowerCAmelCase_ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase_ : Any = decoder_layerdrop
lowerCAmelCase_ : Tuple = encoder_layers
lowerCAmelCase_ : str = auxiliary_loss
lowerCAmelCase_ : Union[str, Any] = position_embedding_type
lowerCAmelCase_ : List[Any] = backbone
lowerCAmelCase_ : Tuple = use_pretrained_backbone
lowerCAmelCase_ : Tuple = dilation
# Hungarian matcher
lowerCAmelCase_ : List[Any] = class_cost
lowerCAmelCase_ : List[Any] = bbox_cost
lowerCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
lowerCAmelCase_ : Dict = mask_loss_coefficient
lowerCAmelCase_ : Any = dice_loss_coefficient
lowerCAmelCase_ : List[str] = bbox_loss_coefficient
lowerCAmelCase_ : List[str] = giou_loss_coefficient
lowerCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def A ( self : int ):
return self.d_model
class __a ( __UpperCamelCase ):
__snake_case : int = version.parse("""1.11""" )
@property
def A ( self : List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A ( self : Dict ):
return 1e-5
@property
def A ( self : Dict ):
return 12
| 600 | 1 |
import numpy as np
class lowerCamelCase_:
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if red is not None:
a_ = red
if green is not None:
a_ = green
if blue is not None:
a_ = blue
if red_edge is not None:
a_ = red_edge
if nir is not None:
a_ = nir
return True
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
a_ = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def __magic_name__ ( self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def __magic_name__ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __magic_name__ ( self ):
return self.nir * (self.red / (self.green**2))
def __magic_name__ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __magic_name__ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def __magic_name__ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __magic_name__ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __magic_name__ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def __magic_name__ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __magic_name__ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __magic_name__ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __magic_name__ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __magic_name__ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __magic_name__ ( self ):
return (self.nir / self.green) - 1
def __magic_name__ ( self ):
return (self.nir / self.redEdge) - 1
def __magic_name__ ( self ):
return (self.red - self.blue) / self.red
def __magic_name__ ( self ):
a_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __magic_name__ ( self ):
return self.nir - self.green
def __magic_name__ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __magic_name__ ( self ):
a_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __magic_name__ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
return (self.nir - b) / (a * self.red)
def __magic_name__ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __magic_name__ ( self ):
return (self.red + self.green + self.blue) / 30.5
def __magic_name__ ( self ):
return self.nir / self.red
def __magic_name__ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __magic_name__ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __magic_name__ ( self ):
return self.green / (self.nir + self.red + self.green)
def __magic_name__ ( self ):
return self.nir / (self.nir + self.red + self.green)
def __magic_name__ ( self ):
return self.red / (self.nir + self.red + self.green)
def __magic_name__ ( self ):
return (self.green - self.red) / (self.green + self.red)
def __magic_name__ ( self ):
return (self.red - self.green) / (self.red + self.green)
def __magic_name__ ( self ):
a_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __magic_name__ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __magic_name__ ( self ):
return self.nir / self.red
def __magic_name__ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __magic_name__ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 719 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
a_ = 4
a_ = (1 << p) - 1
for _ in range(p - 2 ):
a_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 403 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _A ( A ) -> Optional[Any]:
lowercase : Union[str, Any] = torch.exp(A )
lowercase : List[str] = torch.sum(A ,dim=1 ) # sum of exp(x_i)
lowercase : Optional[int] = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A ) - B / A
class _UpperCamelCase ( nn.Module):
'''simple docstring'''
def __init__( self , a_ ) -> Tuple:
super().__init__()
lowercase : Dict = config.output_attentions
lowercase : List[Any] = config.output_hidden_states
lowercase : Any = nn.ModuleList([BertLayer(a_ ) for _ in range(config.num_hidden_layers )] )
lowercase : Union[str, Any] = nn.ModuleList([BertHighway(a_ ) for _ in range(config.num_hidden_layers )] )
lowercase : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , a_ ) -> Tuple:
if (type(a_ ) is float) or (type(a_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowercase : List[str] = x
else:
lowercase : List[Any] = x
def a__ ( self , a_ ) -> str:
lowercase : str = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , a_ , a_=None , a_=None , a_=None , a_=None , ) -> List[Any]:
lowercase : Optional[int] = ()
lowercase : Union[str, Any] = ()
lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowercase : List[str] = all_hidden_states + (hidden_states,)
lowercase : List[Any] = layer_module(
a_ , a_ , head_mask[i] , a_ , a_ )
lowercase : List[Any] = layer_outputs[0]
if self.output_attentions:
lowercase : Any = all_attentions + (layer_outputs[1],)
lowercase : Tuple = (hidden_states,)
if self.output_hidden_states:
lowercase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase : Optional[Any] = current_outputs + (all_attentions,)
lowercase : int = self.highway[i](a_ )
# logits, pooled_output
if not self.training:
lowercase : str = highway_exit[0]
lowercase : List[str] = entropy(a_ )
lowercase : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a_ , i + 1 )
else:
lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase : Optional[int] = all_hidden_states + (hidden_states,)
lowercase : Any = (hidden_states,)
if self.output_hidden_states:
lowercase : Optional[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase : Dict = outputs + (all_attentions,)
lowercase : Union[str, Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ) -> List[str]:
super().__init__(a_ )
lowercase : List[Any] = config
lowercase : Tuple = BertEmbeddings(a_ )
lowercase : Optional[Any] = DeeBertEncoder(a_ )
lowercase : int = BertPooler(a_ )
self.init_weights()
def a__ ( self ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ) -> Dict:
return self.embeddings.word_embeddings
def a__ ( self , a_ ) -> Dict:
lowercase : Optional[Any] = value
def a__ ( self , a_ ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a_ )
@add_start_docstrings_to_model_forward(a_ )
def a__ ( self , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , ) -> List[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
lowercase : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowercase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase : Any = torch.ones(a_ , device=a_ )
if encoder_attention_mask is None:
lowercase : Any = torch.ones(a_ , device=a_ )
if token_type_ids is None:
lowercase : Optional[Any] = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase : torch.Tensor = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase : Tuple = encoder_attention_mask[:, None, None, :]
lowercase : Optional[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowercase : List[str] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase : int = self.get_head_mask(a_ , self.config.num_hidden_layers )
lowercase : Union[str, Any] = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
lowercase : Optional[Any] = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowercase : Optional[Any] = encoder_outputs[0]
lowercase : List[Any] = self.pooler(a_ )
lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ ) -> Union[str, Any]:
lowercase : Union[str, Any] = message
lowercase : List[Any] = exit_layer # start from 1!
class _UpperCamelCase ( nn.Module):
'''simple docstring'''
def __init__( self , a_ ) -> int:
super().__init__()
lowercase : str = BertPooler(a_ )
lowercase : Dict = nn.Dropout(config.hidden_dropout_prob )
lowercase : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , a_ ) -> Union[str, Any]:
# Pooler
lowercase : Union[str, Any] = encoder_outputs[0]
lowercase : Tuple = self.pooler(a_ )
# "return" pooler_output
# BertModel
lowercase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase : List[str] = bmodel_output[1]
lowercase : Union[str, Any] = self.dropout(a_ )
lowercase : Any = self.classifier(a_ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ) -> Dict:
super().__init__(a_ )
lowercase : Dict = config.num_labels
lowercase : Dict = config.num_hidden_layers
lowercase : Any = DeeBertModel(a_ )
lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def a__ ( self , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=-1 , a_=False , ) -> Optional[Any]:
lowercase : Optional[Any] = self.num_layers
try:
lowercase : List[Any] = self.bert(
a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase : Any = outputs[1]
lowercase : Union[str, Any] = self.dropout(a_ )
lowercase : List[Any] = self.classifier(a_ )
lowercase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase : Tuple = e.message
lowercase : Dict = e.exit_layer
lowercase : List[Any] = outputs[0]
if not self.training:
lowercase : Any = entropy(a_ )
lowercase : Tuple = []
lowercase : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase : str = MSELoss()
lowercase : List[str] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : int = CrossEntropyLoss()
lowercase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase : Dict = []
for highway_exit in outputs[-1]:
lowercase : str = highway_exit[0]
if not self.training:
highway_logits_all.append(a_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase : int = MSELoss()
lowercase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : Optional[int] = CrossEntropyLoss()
lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a_ )
if train_highway:
lowercase : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase : Union[str, Any] = (loss,) + outputs
if not self.training:
lowercase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 372 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=3 , a_=True , a_=True , a_=0.1 , a_=0.1 , a_=2_2_4 , a_=1_0_0_0 , a_=[3, 3, 6, 4] , a_=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Union[str, Any]:
lowercase : Optional[Any] = parent
lowercase : List[Any] = batch_size
lowercase : Union[str, Any] = num_channels
lowercase : str = is_training
lowercase : Any = use_labels
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Optional[Any] = num_labels
lowercase : str = image_size
lowercase : Dict = layer_depths
lowercase : List[str] = embed_dims
def a__ ( self ) -> Optional[Any]:
lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int = None
if self.use_labels:
lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=a_ , layer_scale_init_value=1e-5 , )
def a__ ( self , a_ , a_ , a_ ) -> Optional[Any]:
lowercase : Tuple = SwiftFormerModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def a__ ( self , a_ , a_ , a_ ) -> Any:
lowercase : Dict = self.num_labels
lowercase : Optional[int] = SwiftFormerForImageClassification(a_ )
model.to(a_ )
model.eval()
lowercase : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase : Tuple = SwiftFormerForImageClassification(a_ )
model.to(a_ )
model.eval()
lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> Dict:
((lowercase) , (lowercase) , (lowercase)) : Dict = self.prepare_config_and_inputs()
lowercase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_snake_case = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def a__ ( self ) -> str:
lowercase : Optional[int] = SwiftFormerModelTester(self )
lowercase : int = ConfigTester(
self , config_class=a_ , has_text_modality=a_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def a__ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Optional[int]:
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(a_ )
lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def a__ ( self ) -> List[Any]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(a_ )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Union[str, Any] = [*signature.parameters.keys()]
lowercase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def a__ ( self ) -> int:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def a__ ( self ) -> Optional[int]:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def a__ ( self ) -> Dict:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] = SwiftFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> str:
def check_hidden_states_output(a_ , a_ , a_ ):
lowercase : Union[str, Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowercase : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
lowercase : str = outputs.hidden_states
lowercase : str = 8
self.assertEqual(len(a_ ) , a_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(a_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : str = True
check_hidden_states_output(a_ , a_ , a_ )
def a__ ( self ) -> Optional[Any]:
def _config_zero_init(a_ ):
lowercase : str = copy.deepcopy(a_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(a_ , a_ , 1e-1_0 )
if isinstance(getattr(a_ , a_ , a_ ) , a_ ):
lowercase : List[Any] = _config_zero_init(getattr(a_ , a_ ) )
setattr(a_ , a_ , a_ )
return configs_no_init
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] = _config_zero_init(a_ )
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a__ ( self ) -> Tuple:
pass
def _A ( ) -> List[str]:
lowercase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def a__ ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def a__ ( self ) -> List[str]:
lowercase : Tuple = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(a_ )
lowercase : Any = self.default_image_processor
lowercase : Optional[int] = prepare_img()
lowercase : Any = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
lowercase : List[str] = model(**a_ )
# verify the logits
lowercase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a_ )
lowercase : List[Any] = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 372 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowercase ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = '''gptj'''
a : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self ,_lowerCamelCase=50400 ,_lowerCamelCase=2048 ,_lowerCamelCase=4096 ,_lowerCamelCase=28 ,_lowerCamelCase=16 ,_lowerCamelCase=64 ,_lowerCamelCase=None ,_lowerCamelCase="gelu_new" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=True ,_lowerCamelCase=50256 ,_lowerCamelCase=50256 ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=a_ ,eos_token_id=a_ ,tie_word_embeddings=a_ ,**a_ )
class __lowercase ( __lowerCamelCase ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase = "default" ,_lowerCamelCase = None ,_lowerCamelCase = False ,) -> str:
'''simple docstring'''
super().__init__(a_ ,task=a_ ,patching_specs=a_ ,use_past=a_ )
if not getattr(self._config ,'''pad_token_id''' ,a_ ):
# TODO: how to do that better?
__lowercase = 0
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(a_ ,direction='''inputs''' )
__lowercase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return self._config.n_layer
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return self._config.n_head
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = -1 ,_lowerCamelCase = -1 ,_lowerCamelCase = False ,_lowerCamelCase = None ,) -> Optional[Any]:
'''simple docstring'''
__lowercase = super(a_ ,self ).generate_dummy_inputs(
a_ ,batch_size=a_ ,seq_length=a_ ,is_pair=a_ ,framework=a_ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["attention_mask"]
if self.use_past:
__lowercase = ordered_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(a_ ,a_ ,dtype=a_ )] ,dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 13
| 706 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 | 0 |
UpperCAmelCase_ : int = {str(digit): digit**5 for digit in range(10)}
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase__ ) )
def __SCREAMING_SNAKE_CASE ( ) -> int:
return sum(
number
for number in range(1000 ,1000000 )
if number == digits_fifth_powers_sum(lowercase__ ) )
if __name__ == "__main__":
print(solution())
| 17 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase :Optional[int] = """bart"""
_lowerCAmelCase :Optional[int] = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE : Tuple = qar_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE : str = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = sas_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : str = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE : List[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE : Dict = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (None, None)
SCREAMING_SNAKE_CASE : Dict = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE : Any = elia['train_eli5']
SCREAMING_SNAKE_CASE : List[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE : Tuple = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = load_models()
_lowerCAmelCase , _lowerCAmelCase :int = load_train_data()
def __lowerCAmelCase ( a_ , a_=10 ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def __lowerCAmelCase ( a_ , a_="wiki40b" , a_="dense" , a_=10 ) -> Optional[int]:
'''simple docstring'''
if source == "none":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE : int = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE : Dict = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a_ : None),
} )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=64 , a_=256 , a_=False , a_=2 , a_=0.95 , a_=0.8 ) -> Any:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase :Optional[Any] = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase :List[str] = """\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase :Optional[int] = """\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase :Optional[int] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase :Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase :str = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase :Dict = action_list.index(action_st)
_lowerCAmelCase :Optional[Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase :Optional[int] = show_type == """Show full text of passages"""
else:
_lowerCAmelCase :Optional[int] = 3
_lowerCAmelCase :Optional[Any] = True
_lowerCAmelCase :List[str] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase :int = """\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n """
st.sidebar.markdown(retriever_info)
_lowerCAmelCase :Dict = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase :Union[str, Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase :Union[str, Any] = """wiki40b"""
_lowerCAmelCase :str = """dense"""
_lowerCAmelCase :List[str] = """beam"""
_lowerCAmelCase :Tuple = 2
_lowerCAmelCase :Optional[int] = 64
_lowerCAmelCase :List[Any] = 256
_lowerCAmelCase :List[str] = None
_lowerCAmelCase :str = None
_lowerCAmelCase :Optional[int] = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase :List[str] = """\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n """
st.sidebar.markdown(generate_info)
_lowerCAmelCase :Dict = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase :List[str] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCAmelCase :Optional[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase :Optional[int] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase :List[str] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase :Any = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase :str = None
# start main text
_lowerCAmelCase :Optional[Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase :str = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase :Optional[Any] = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase :Union[str, Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_lowerCAmelCase , _lowerCAmelCase :Tuple = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_lowerCAmelCase :int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase :List[str] = support_list[:10]
_lowerCAmelCase :Union[str, Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase :int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase :Optional[int] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase :Tuple = res[1].strip()
if sec_titles == "":
_lowerCAmelCase :Any = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase :str = sec_titles.split(""" & """)
_lowerCAmelCase :List[str] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase :Union[str, Any] = find_nearest_training(question)
_lowerCAmelCase :Union[str, Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase :List[str] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase :Union[str, Any] = """\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 713 | '''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : List[Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.array(a_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Tuple = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Dict = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : int = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return image
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = mask[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(a_ , dim=0 )
return mask
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : UNetaDModel
snake_case__ : RePaintScheduler
def __init__( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ , lowercase__ = 250 , lowercase__ = 0.0 , lowercase__ = 10 , lowercase__ = 10 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : Optional[int] = image
SCREAMING_SNAKE_CASE : List[str] = _preprocess_image(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Any = _preprocess_mask(lowercase__ )
SCREAMING_SNAKE_CASE : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : int = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : Dict = original_image.shape
SCREAMING_SNAKE_CASE : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = eta
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Tuple = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : int = t
SCREAMING_SNAKE_CASE : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 179 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 260 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = """linear"""
SCREAMING_SNAKE_CASE_: Dict = """cosine"""
SCREAMING_SNAKE_CASE_: Tuple = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE_: Dict = """polynomial"""
SCREAMING_SNAKE_CASE_: Optional[int] = """constant"""
SCREAMING_SNAKE_CASE_: str = """constant_with_warmup"""
SCREAMING_SNAKE_CASE_: Optional[int] = """piecewise_constant"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
return LambdaLR(lowerCAmelCase__ ,lambda lowerCAmelCase__ : 1 ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 ,lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
A__ = {}
A__ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(':' )
A__ = int(lowerCAmelCase__ )
A__ = float(lowerCAmelCase__ )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ ,lowerCAmelCase__ ):
def rule_func(lowerCAmelCase__ ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(lowerCAmelCase__ ,lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=-1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0.5 ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1 ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=1E-7 ,lowerCAmelCase__=1.0 ,lowerCAmelCase__=-1 ):
A__ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = 1 ,lowerCAmelCase__ = 1.0 ,lowerCAmelCase__ = -1 ,):
A__ = SchedulerType(lowerCAmelCase__ )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ ,step_rules=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,num_cycles=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,power=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ ,)
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
| 260 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase , **UpperCamelCase):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase)
| 714 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase_ = 1.0_5457_1817E-34 # unit of ℏ : J * s
lowerCAmelCase_ = 3E8 # unit of c : m * s^-1
def lowerCAmelCase( a__ : float , a__ : float , a__ : float ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCamelCase__ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase__ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase__ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 426 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCAmelCase ( ):
_lowercase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__magic_name__ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__magic_name__ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__magic_name__ , default=4_2 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__magic_name__ , default=0 , help="cuda_id." , )
_lowercase: Tuple = parser.parse_args()
return args
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not len(__magic_name__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_lowercase , _lowercase: int = imgs[0].size
_lowercase: Dict = Image.new("RGB" , size=(cols * w, rows * h) )
_lowercase , _lowercase: List[Any] = grid.size
for i, img in enumerate(__magic_name__ ):
grid.paste(__magic_name__ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCAmelCase ( __magic_name__ , __magic_name__="robotic cat with wings" , __magic_name__=7.5 , __magic_name__=5_0 , __magic_name__=1 , __magic_name__=4_2 , ):
_lowercase: Tuple = torch.Generator(pipeline.device ).manual_seed(__magic_name__ )
_lowercase: Optional[Any] = pipeline(
__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=__magic_name__ , generator=__magic_name__ , num_images_per_prompt=__magic_name__ , ).images
_lowercase: str = int(math.sqrt(__magic_name__ ) )
_lowercase: List[str] = image_grid(__magic_name__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
# Load models and create wrapper for stable diffusion
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_SCREAMING_SNAKE_CASE : str = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_SCREAMING_SNAKE_CASE : Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_SCREAMING_SNAKE_CASE : List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = unet.to(torch.device('cuda', args.cuda_id))
_SCREAMING_SNAKE_CASE : Optional[int] = pipeline.to(unet.device)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_SCREAMING_SNAKE_CASE : int = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 226 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : List[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]):
import nltk
nltk.download("wordnet")
if NLTK_VERSION >= version.Version("3.6.5"):
nltk.download("punkt")
if NLTK_VERSION >= version.Version("3.6.6"):
nltk.download("omw-1.4")
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int=0.9 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Dict=0.5):
if NLTK_VERSION >= version.Version("3.6.5"):
_lowercase: List[str] = [
meteor_score.single_meteor_score(
word_tokenize(_UpperCamelCase) , word_tokenize(_UpperCamelCase) , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
else:
_lowercase: Optional[int] = [
meteor_score.single_meteor_score(_UpperCamelCase , _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
return {"meteor": np.mean(_UpperCamelCase)}
| 226 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A__ :
def __init__( self : List[Any] , _a : Any , _a : Any=13 , _a : Dict=10 , _a : Optional[Any]=3 , _a : Tuple=2 , _a : Union[str, Any]=2 , _a : str=2 , _a : Optional[int]=True , _a : Optional[int]=True , _a : Tuple=32 , _a : Dict=5 , _a : List[str]=4 , _a : Union[str, Any]=37 , _a : Tuple="gelu" , _a : Any=0.1 , _a : Tuple=0.1 , _a : Union[str, Any]=10 , _a : Optional[Any]=0.02 , _a : str=0.9 , _a : Tuple=None , ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =tubelet_size
_SCREAMING_SNAKE_CASE =num_frames
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =mask_ratio
_SCREAMING_SNAKE_CASE =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_SCREAMING_SNAKE_CASE =int(mask_ratio * self.seq_length )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : Optional[Any] , _a : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VideoMAEModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , _a : List[str] , _a : str , _a : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining(_a )
model.to(_a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE =torch.ones((self.num_masks,) )
_SCREAMING_SNAKE_CASE =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE =mask.expand(self.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE =model(_a , _a )
# model only returns predictions for masked patches
_SCREAMING_SNAKE_CASE =mask.sum().item()
_SCREAMING_SNAKE_CASE =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VideoMAEModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCamelCase ( self : Tuple , _a : Dict , _a : str , _a : Dict=False ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(_a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE =torch.ones((self.model_tester.num_masks,) )
_SCREAMING_SNAKE_CASE =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE =mask.expand(self.model_tester.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE =bool_masked_pos.to(_a )
if return_labels:
if model_class in [
*get_values(_a ),
]:
_SCREAMING_SNAKE_CASE =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
@slow
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =VideoMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_SCREAMING_SNAKE_CASE =len(_a )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_a : Optional[Any] , _a : int , _a : Union[str, Any] ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_a ) , _a )
_SCREAMING_SNAKE_CASE =self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( ):
_SCREAMING_SNAKE_CASE =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''')
_SCREAMING_SNAKE_CASE =np.load(a__)
return list(a__)
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_video()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_video()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''pt''' ).to(_a )
# add boolean mask, indicating which patches to mask
_SCREAMING_SNAKE_CASE =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_SCREAMING_SNAKE_CASE =torch.load(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size([1, 1408, 1536] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=_a )
self.assertEqual(outputs.logits.shape , _a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_SCREAMING_SNAKE_CASE =torch.tensor([0.51_42] , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_a ).to(
_a )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
_SCREAMING_SNAKE_CASE =torch.tensor(torch.tensor([0.64_69] ) , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) ) | 721 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : int , **_a : Dict ) -> int:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any] ) -> int:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Any , _a : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"] | 191 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
import os
from collections.abc import Iterator
def _lowerCamelCase( lowercase__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowercase__ ):
__lowercase= [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase__ , lowercase__ ).lstrip('./' )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(lowercase__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _lowerCamelCase( lowercase__ = "." ) -> None:
'''simple docstring'''
__lowercase= ''
for filepath in sorted(good_file_paths(lowercase__ ) ):
__lowercase, __lowercase= os.path.split(lowercase__ )
if filepath != old_path:
__lowercase= print_path(lowercase__ , lowercase__ )
__lowercase= (filepath.count(os.sep ) + 1) if filepath else 0
__lowercase= F'{filepath}/{filename}'.replace(' ' , '%20' )
__lowercase= os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(lowercase__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 230 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase = StableDiffusionSAGPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = False
def snake_case__( self ) -> Dict:
torch.manual_seed(0 )
_a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_a : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
_a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_a : Tuple = CLIPTextModel(lowercase )
_a : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self , lowercase , lowercase=0 ) -> Union[str, Any]:
if str(lowercase ).startswith('''mps''' ):
_a : Optional[Any] = torch.manual_seed(lowercase )
else:
_a : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_a : Optional[Any] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def snake_case__( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self ) -> Optional[Any]:
_a : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a : Optional[Any] = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
_a : Any = '''.'''
_a : Union[str, Any] = torch.manual_seed(0 )
_a : Dict = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_a : Optional[Any] = output.images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case__( self ) -> Optional[Any]:
_a : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a : Dict = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
_a : Optional[Any] = '''.'''
_a : Any = torch.manual_seed(0 )
_a : str = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_a : List[str] = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case__( self ) -> List[Any]:
_a : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a : Any = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
_a : Any = '''.'''
_a : Optional[Any] = torch.manual_seed(0 )
_a : Optional[Any] = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_a : Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3) | 701 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase__ ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
_a , _a : int = emb.weight.shape
_a : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
_a : Dict = emb.weight.data
return lin_layer
def UpperCamelCase__ ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
_a : str = Namespace(**checkpoint['''cfg''']['''model'''] )
_a : str = checkpoint['''model''']
remove_ignore_keys_(UpperCAmelCase )
_a : Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_a : Any = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_a : str = XGLMConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_a : Any = XGLMForCausalLM(UpperCAmelCase )
_a : List[Any] = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
print(UpperCAmelCase )
_a : List[str] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 307 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.