code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : str = 0.00
__UpperCAmelCase : List[Any] = 0
for resistor in resistors:
if resistor <= 0:
__UpperCAmelCase : Optional[int] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : Any = 0.00
__UpperCAmelCase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__UpperCAmelCase : Tuple = f'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = ["a", "b", "c"]
# Defaults to last layer if both are None
__UpperCAmelCase , __UpperCAmelCase : str = get_aligned_output_features_output_indices(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , ["c"] )
self.assertEqual(__lowerCamelCase , [2] )
# Out indices set to match out features
__UpperCAmelCase , __UpperCAmelCase : str = get_aligned_output_features_output_indices(["a", "c"] , __lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , ["a", "c"] )
self.assertEqual(__lowerCamelCase , [0, 2] )
# Out features set to match out indices
__UpperCAmelCase , __UpperCAmelCase : Any = get_aligned_output_features_output_indices(__lowerCamelCase , [0, 2] , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , ["a", "c"] )
self.assertEqual(__lowerCamelCase , [0, 2] )
# Out features selected from negative indices
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(__lowerCamelCase , [-3, -1] , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , ["a", "c"] )
self.assertEqual(__lowerCamelCase , [-3, -1] )
def _lowerCamelCase ( self: str ) -> Any:
# Stage names must be set
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , __lowerCamelCase )
# Out features must be a list
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(__lowerCamelCase , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(__lowerCamelCase , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(__lowerCamelCase ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Dict = BackboneMixin()
__UpperCAmelCase : Union[str, Any] = ["a", "b", "c"]
__UpperCAmelCase : Optional[Any] = ["a", "c"]
__UpperCAmelCase : int = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__UpperCAmelCase : Any = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
__UpperCAmelCase : Optional[Any] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_snake_case = logging.getLogger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = "sequence-classification"
def __init__( self: List[Any] , __lowerCamelCase: Union[str, Any] ) -> str:
if type(__lowerCamelCase ) == dict:
__UpperCAmelCase : int = Namespace(**__lowerCamelCase )
__UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
__UpperCAmelCase : Optional[int] = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCamelCase , __lowerCamelCase , self.mode )
def _lowerCamelCase ( self: Dict , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
return self.model(**__lowerCamelCase )
def _lowerCamelCase ( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase : List[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__UpperCAmelCase : Union[str, Any] = self(**__lowerCamelCase )
__UpperCAmelCase : List[Any] = outputs[0]
__UpperCAmelCase : List[Any] = self.trainer.lr_schedulers[0]["scheduler"]
__UpperCAmelCase : Tuple = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : int = self.hparams
__UpperCAmelCase : Optional[Any] = processors[args.task]()
__UpperCAmelCase : Tuple = processor.get_labels()
for mode in ["train", "dev"]:
__UpperCAmelCase : Optional[int] = self._feature_file(__lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __lowerCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__UpperCAmelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__UpperCAmelCase : Tuple = convert_examples_to_features(
__lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: bool = False ) -> DataLoader:
__UpperCAmelCase : str = "dev" if mode == "test" else mode
__UpperCAmelCase : Tuple = self._feature_file(__lowerCamelCase )
logger.info("Loading features from cached file %s" , __lowerCamelCase )
__UpperCAmelCase : str = torch.load(__lowerCamelCase )
__UpperCAmelCase : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCAmelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCAmelCase : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase : List[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__UpperCAmelCase : List[Any] = self(**__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = outputs[:2]
__UpperCAmelCase : Tuple = logits.detach().cpu().numpy()
__UpperCAmelCase : str = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self: str , __lowerCamelCase: List[str] ) -> tuple:
__UpperCAmelCase : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__UpperCAmelCase : Dict = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase : Union[str, Any] = np.argmax(__lowerCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase : Union[str, Any] = np.squeeze(__lowerCamelCase )
__UpperCAmelCase : List[str] = np.concatenate([x["target"] for x in outputs] , axis=0 )
__UpperCAmelCase : str = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase : str = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCamelCase , __lowerCamelCase )}
__UpperCAmelCase : Optional[Any] = dict(results.items() )
__UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: list ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self._eval_end(__lowerCamelCase )
__UpperCAmelCase : List[str] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Tuple ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = self._eval_end(__lowerCamelCase )
__UpperCAmelCase : List[str] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__lowerCamelCase , required=__lowerCamelCase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def _UpperCamelCase ( ) -> Tuple:
__UpperCAmelCase : str = argparse.ArgumentParser()
add_generic_args(snake_case__, os.getcwd() )
__UpperCAmelCase : Dict = GLUETransformer.add_model_specific_args(snake_case__, os.getcwd() )
__UpperCAmelCase : List[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCAmelCase : Tuple = os.path.join(
"./results", f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''', )
os.makedirs(args.output_dir )
__UpperCAmelCase : Tuple = GLUETransformer(snake_case__ )
__UpperCAmelCase : str = generic_train(snake_case__, snake_case__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt" ), recursive=snake_case__ ) )
__UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case__ )
if __name__ == "__main__":
main()
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
_enforce_args(snake_case__, snake_case__ )
if n == 0:
return 0
__UpperCAmelCase : Optional[int] = float("-inf" )
for i in range(1, n + 1 ):
__UpperCAmelCase : str = max(
snake_case__, prices[i - 1] + naive_cut_rod_recursive(n - i, snake_case__ ) )
return max_revue
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
_enforce_args(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__, snake_case__, snake_case__ )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__UpperCAmelCase : List[str] = float("-inf" )
for i in range(1, n + 1 ):
__UpperCAmelCase : Optional[Any] = max(
snake_case__, prices[i - 1] + _top_down_cut_rod_recursive(n - i, snake_case__, snake_case__ ), )
__UpperCAmelCase : int = max_revenue
return max_rev[n]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Dict:
_enforce_args(snake_case__, snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__UpperCAmelCase : Any = [float("-inf" ) for _ in range(n + 1 )]
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, n + 1 ):
__UpperCAmelCase : str = max_rev[i]
for j in range(1, i + 1 ):
__UpperCAmelCase : Optional[Any] = max(snake_case__, prices[j - 1] + max_rev[i - j] )
__UpperCAmelCase : int = max_revenue_i
return max_rev[n]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
if n < 0:
__UpperCAmelCase : List[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
__UpperCAmelCase : str = (
"Each integral piece of rod must have a corresponding price. "
f'''Got n = {n} but length of prices = {len(snake_case__ )}'''
)
raise ValueError(snake_case__ )
def _UpperCamelCase ( ) -> Optional[Any]:
__UpperCAmelCase : Dict = [6, 10, 12, 15, 20, 23]
__UpperCAmelCase : Optional[int] = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__UpperCAmelCase : List[str] = 36
__UpperCAmelCase : Union[str, Any] = top_down_cut_rod(snake_case__, snake_case__ )
__UpperCAmelCase : Optional[int] = bottom_up_cut_rod(snake_case__, snake_case__ )
__UpperCAmelCase : Union[str, Any] = naive_cut_rod_recursive(snake_case__, snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_snake_case = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_snake_case = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
_snake_case = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Any ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: int = CHRF.CHAR_ORDER , __lowerCamelCase: int = CHRF.WORD_ORDER , __lowerCamelCase: int = CHRF.BETA , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ) -> List[Any]:
__UpperCAmelCase : List[str] = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__UpperCAmelCase : Dict = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__UpperCAmelCase : Union[str, Any] = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : List[str] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> str:
__UpperCAmelCase : Union[str, Any] = OrderedDict()
__UpperCAmelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Union[str, Any] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : int = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : List[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 512}
__UpperCAmelCase : Optional[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
__UpperCAmelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : str = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Any = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : List[str] = "nlvr"
__UpperCAmelCase : List[Any] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : List[Any] = load_state_dict(snake_case__ )
__UpperCAmelCase : str = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Dict = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : List[Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : List[Any] = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : Any = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_snake_case = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_snake_case = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = 0.0
for i, j in zip(__lowerCamelCase , __lowerCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowerCamelCase , __lowerCamelCase ) else 0.0
__UpperCAmelCase : List[str] = n_correct / len(__lowerCamelCase )
return {
"accuracy": accuracy,
}
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]:
# Initialise PyTorch model
__UpperCAmelCase : Any = XLNetConfig.from_json_file(snake_case__ )
__UpperCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__UpperCAmelCase : Dict = finetuning_task
__UpperCAmelCase : Dict = GLUE_TASKS_NUM_LABELS[finetuning_task]
__UpperCAmelCase : Optional[Any] = XLNetForSequenceClassification(snake_case__ )
elif "squad" in finetuning_task:
__UpperCAmelCase : Union[str, Any] = finetuning_task
__UpperCAmelCase : List[str] = XLNetForQuestionAnswering(snake_case__ )
else:
__UpperCAmelCase : Union[str, Any] = XLNetLMHeadModel(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case__, snake_case__, snake_case__ )
# Save pytorch-model
__UpperCAmelCase : Optional[Any] = os.path.join(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = os.path.join(snake_case__, snake_case__ )
print(f'''Save PyTorch model to {os.path.abspath(snake_case__ )}''' )
torch.save(model.state_dict(), snake_case__ )
print(f'''Save configuration file to {os.path.abspath(snake_case__ )}''' )
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''ConvNextFeatureExtractor''']
_snake_case = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = "xlm-roberta"
def __init__( self: Union[str, Any] , __lowerCamelCase: Dict=3_05_22 , __lowerCamelCase: Optional[Any]=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Tuple=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: str=5_12 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: Union[str, Any]=1e-12 , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: List[Any]="absolute" , __lowerCamelCase: List[Any]=True , __lowerCamelCase: int=None , **__lowerCamelCase: Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : Union[str, Any] = classifier_dropout
class _snake_case ( _lowercase ):
@property
def _lowerCamelCase ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _snake_case :
lowerCamelCase__: int
lowerCamelCase__: int
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
__UpperCAmelCase : str = size
def __getitem__( self: Union[str, Any] , __lowerCamelCase: int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self: int ) -> str:
return self._size
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ) -> Any:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int ) -> int | None:
__UpperCAmelCase : int = deque([start_vertex] )
__UpperCAmelCase : list[int | None] = [None] * self.size
__UpperCAmelCase : List[str] = 0
while queue:
__UpperCAmelCase : str = queue.popleft()
__UpperCAmelCase : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase : str = current_distance + edge.weight
__UpperCAmelCase : Optional[int] = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase : Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Dict = prime_factors(snake_case__ )
if is_square_free(snake_case__ ):
return -1 if len(snake_case__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[float, list[float]]:
__UpperCAmelCase : List[Any] = list(range(len(snake_case__ ) ) )
__UpperCAmelCase : Optional[int] = [v / w for v, w in zip(snake_case__, snake_case__ )]
index.sort(key=lambda snake_case__ : ratio[i], reverse=snake_case__ )
__UpperCAmelCase : float = 0
__UpperCAmelCase : list[float] = [0] * len(snake_case__ )
for i in index:
if weight[i] <= capacity:
__UpperCAmelCase : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCAmelCase : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _snake_case ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[Any] ) -> str:
__UpperCAmelCase : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
__UpperCAmelCase : Any = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained("gpt2" )
__UpperCAmelCase : List[str] = GenerationConfig.from_model_config(__lowerCamelCase )
__UpperCAmelCase : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : List[Any] = GenerationConfig()
__UpperCAmelCase : Tuple = {
"max_new_tokens": 10_24,
"foo": "bar",
}
__UpperCAmelCase : Dict = copy.deepcopy(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = generation_config.update(**__lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"} )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = GenerationConfig()
__UpperCAmelCase : Dict = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
__UpperCAmelCase : List[str] = GenerationConfig.from_model_config(__lowerCamelCase )
assert not hasattr(__lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
__UpperCAmelCase : List[Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls: List[str] ) -> str:
__UpperCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: List[Any] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def _lowerCamelCase ( self: Optional[int] ) -> str:
__UpperCAmelCase : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
__UpperCAmelCase : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : Dict = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
__UpperCAmelCase : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
| 342 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 1 |
from __future__ import annotations
import math
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(snake_case__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, snake_case__, snake_case__, snake_case__ ), minimax(depth + 1, node_index * 2 + 1, snake_case__, snake_case__, snake_case__ ), )
return min(
minimax(depth + 1, node_index * 2, snake_case__, snake_case__, snake_case__ ), minimax(depth + 1, node_index * 2 + 1, snake_case__, snake_case__, snake_case__ ), )
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__UpperCAmelCase : Optional[Any] = math.log(len(snake_case__ ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, snake_case__, snake_case__, snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_snake_case = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any , __lowerCamelCase: str , __lowerCamelCase: bool , __lowerCamelCase: str = None , __lowerCamelCase: list = None ) -> str:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = os.path.abspath(os.path.join("examples" , "by_feature" ) )
__UpperCAmelCase : List[Any] = os.path.abspath("examples" )
for item in os.listdir(__lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase : List[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCamelCase , feature_script=__lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
__UpperCAmelCase : List[Any] = compare_against_test(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = "\n".join(__lowerCamelCase )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase : Dict = diff.replace(__lowerCamelCase , "" )
self.assertEqual(__lowerCamelCase , "" )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
self.one_complete_example("complete_nlp_example.py" , __lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : Union[str, Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
__UpperCAmelCase : List[str] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = False
@classmethod
def _lowerCamelCase ( cls: Tuple ) -> Any:
super().setUpClass()
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : str = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowerCamelCase ( cls: Tuple ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
__UpperCAmelCase : Tuple = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCAmelCase : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
__UpperCAmelCase : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
self.assertNotIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
__UpperCAmelCase : Any = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
if torch.cuda.is_available():
__UpperCAmelCase : Dict = torch.cuda.device_count()
else:
__UpperCAmelCase : int = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
else:
self.assertIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
__UpperCAmelCase : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
__UpperCAmelCase : Dict = re.findall("({.+})" , __lowerCamelCase )
__UpperCAmelCase : Any = [r for r in results if "accuracy" in r][-1]
__UpperCAmelCase : Any = ast.literal_eval(__lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Any = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowerCamelCase ( self: Any ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase : int = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , "tracking" ) ) )
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : int = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : List[Any] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Union[str, Any] = "data2vec-audio"
def __init__( self: str , __lowerCamelCase: int=32 , __lowerCamelCase: int=7_68 , __lowerCamelCase: int=12 , __lowerCamelCase: Any=12 , __lowerCamelCase: Tuple=30_72 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.0 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1e-5 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase: Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase: Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase: str=False , __lowerCamelCase: Dict=16 , __lowerCamelCase: Union[str, Any]=19 , __lowerCamelCase: List[Any]=5 , __lowerCamelCase: Optional[Any]=0.05 , __lowerCamelCase: str=10 , __lowerCamelCase: Any=2 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=10 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: int="sum" , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[str]=2_56 , __lowerCamelCase: str=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCamelCase: str=(5, 3, 3, 1, 1) , __lowerCamelCase: int=(1, 2, 3, 1, 1) , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Any=1 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: int=False , __lowerCamelCase: int=3 , __lowerCamelCase: int=2 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: int=None , **__lowerCamelCase: Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = feat_extract_activation
__UpperCAmelCase : Dict = list(__lowerCamelCase )
__UpperCAmelCase : List[Any] = list(__lowerCamelCase )
__UpperCAmelCase : Tuple = list(__lowerCamelCase )
__UpperCAmelCase : str = conv_bias
__UpperCAmelCase : str = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : List[Any] = conv_pos_kernel_size
__UpperCAmelCase : List[str] = len(self.conv_dim )
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Union[str, Any] = activation_dropout
__UpperCAmelCase : Dict = feat_proj_dropout
__UpperCAmelCase : Tuple = final_dropout
__UpperCAmelCase : List[Any] = layerdrop
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = mask_time_prob
__UpperCAmelCase : Optional[int] = mask_time_length
__UpperCAmelCase : Any = mask_time_min_masks
__UpperCAmelCase : List[str] = mask_feature_prob
__UpperCAmelCase : Optional[int] = mask_feature_length
__UpperCAmelCase : Any = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : Dict = ctc_loss_reduction
__UpperCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
__UpperCAmelCase : int = add_adapter
__UpperCAmelCase : List[str] = adapter_kernel_size
__UpperCAmelCase : str = adapter_stride
__UpperCAmelCase : List[str] = num_adapter_layers
__UpperCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCAmelCase : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCAmelCase : List[Any] = list(__lowerCamelCase )
__UpperCAmelCase : Dict = list(__lowerCamelCase )
__UpperCAmelCase : int = list(__lowerCamelCase )
__UpperCAmelCase : List[Any] = xvector_output_dim
@property
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
return math.prod(self.conv_stride )
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
from math import ceil
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Optional[Any] = list(range(0, snake_case__ ) )
__UpperCAmelCase : Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCAmelCase : str = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
__UpperCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
__UpperCAmelCase : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(snake_case__ ) )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Dict = list(range(snake_case__ ) )
__UpperCAmelCase : str = int(ceil(n_layers / len(snake_case__ ) ) )
__UpperCAmelCase : Any = [layers[i : i + n_blocks] for i in range(0, snake_case__, snake_case__ )]
return dict(zip(snake_case__, snake_case__ ) )
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
_snake_case = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_snake_case = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_snake_case = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
__UpperCAmelCase : int = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__UpperCAmelCase : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__UpperCAmelCase : Optional[Any] = DetrConfig(use_timm_backbone=snake_case__, backbone_config=snake_case__ )
# set label attributes
__UpperCAmelCase : Optional[Any] = "panoptic" in model_name
if is_panoptic:
__UpperCAmelCase : Optional[int] = 250
else:
__UpperCAmelCase : Dict = 91
__UpperCAmelCase : Optional[Any] = "huggingface/label-files"
__UpperCAmelCase : Tuple = "coco-detection-id2label.json"
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _UpperCamelCase ( snake_case__ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Optional[Any] = val
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = ""
if is_panoptic:
__UpperCAmelCase : Dict = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase : int = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:256, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[256:512]
__UpperCAmelCase : Optional[Any] = in_proj_weight[-256:, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : Tuple = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Any = in_proj_weight[:256, :]
__UpperCAmelCase : Tuple = in_proj_bias[:256]
__UpperCAmelCase : str = in_proj_weight[256:512, :]
__UpperCAmelCase : Dict = in_proj_bias[256:512]
__UpperCAmelCase : Any = in_proj_weight[-256:, :]
__UpperCAmelCase : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase : Union[str, Any] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__UpperCAmelCase : int = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__UpperCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__UpperCAmelCase : Union[str, Any] = in_proj_weight_cross_attn[-256:, :]
__UpperCAmelCase : int = in_proj_bias_cross_attn[-256:]
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : int = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Dict = get_detr_config(snake_case__ )
# load original model from torch hub
__UpperCAmelCase : Optional[int] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
__UpperCAmelCase : List[Any] = torch.hub.load("facebookresearch/detr", model_name_to_original_name[model_name], pretrained=snake_case__ ).eval()
__UpperCAmelCase : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
__UpperCAmelCase : Dict = "detr." + src
rename_key(snake_case__, snake_case__, snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__, is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Dict = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__UpperCAmelCase : Optional[int] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : int = state_dict.pop(snake_case__ )
__UpperCAmelCase : Union[str, Any] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__UpperCAmelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Union[str, Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCAmelCase : int = state_dict.pop(snake_case__ )
__UpperCAmelCase : Optional[Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Optional[Any] = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
__UpperCAmelCase : Union[str, Any] = "coco_panoptic" if is_panoptic else "coco_detection"
__UpperCAmelCase : Union[str, Any] = DetrImageProcessor(format=snake_case__ )
__UpperCAmelCase : str = processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Optional[Any] = encoding["pixel_values"]
__UpperCAmelCase : List[Any] = detr(snake_case__ )
__UpperCAmelCase : str = model(snake_case__ )
assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-3 )
assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_snake_case = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , *__lowerCamelCase: Tuple , **__lowerCamelCase: Any ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[Any]=None ) -> int:
__UpperCAmelCase : Any = {}
if top_k is not None:
__UpperCAmelCase : Any = top_k
return {}, {}, postprocess_params
def __call__( self: Tuple , __lowerCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCamelCase: Dict ) -> str:
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = load_image(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str ) -> int:
__UpperCAmelCase : Any = self.model(**__lowerCamelCase )
return model_outputs
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any]=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
__UpperCAmelCase : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase : Optional[int] = model_outputs.logits.softmax(-1 )[0]
__UpperCAmelCase , __UpperCAmelCase : List[Any] = probs.topk(__lowerCamelCase )
elif self.framework == "tf":
__UpperCAmelCase : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
__UpperCAmelCase : Optional[int] = tf.math.top_k(__lowerCamelCase , k=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : str = scores.tolist()
__UpperCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCamelCase , __lowerCamelCase )]
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=True, snake_case__="pt" ) -> int:
__UpperCAmelCase : int = {"add_prefix_space": True} if isinstance(snake_case__, snake_case__ ) and not line.startswith(" " ) else {}
__UpperCAmelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=snake_case__, padding="max_length" if pad_to_max_length else None, truncation=snake_case__, return_tensors=snake_case__, add_special_tokens=snake_case__, **snake_case__, )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int="train" , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Union[str, Any]="" , ) -> List[str]:
super().__init__()
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).joinpath(type_path + ".source" )
__UpperCAmelCase : Optional[int] = Path(__lowerCamelCase ).joinpath(type_path + ".target" )
__UpperCAmelCase : int = self.get_char_lens(self.src_file )
__UpperCAmelCase : str = max_source_length
__UpperCAmelCase : str = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : Optional[Any] = prefix
if n_obs is not None:
__UpperCAmelCase : Optional[Any] = self.src_lens[:n_obs]
__UpperCAmelCase : Dict = src_lang
__UpperCAmelCase : Optional[int] = tgt_lang
def __len__( self: Optional[int] ) -> Tuple:
return len(self.src_lens )
def __getitem__( self: Optional[int] , __lowerCamelCase: Dict ) -> Dict[str, torch.Tensor]:
__UpperCAmelCase : List[Any] = index + 1 # linecache starts at 1
__UpperCAmelCase : int = self.prefix + linecache.getline(str(self.src_file ) , __lowerCamelCase ).rstrip("\n" )
__UpperCAmelCase : List[str] = linecache.getline(str(self.tgt_file ) , __lowerCamelCase ).rstrip("\n" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCAmelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
)
__UpperCAmelCase : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
__UpperCAmelCase : Dict = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_source_length , "right" )
__UpperCAmelCase : Any = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_target_length , "right" )
__UpperCAmelCase : Union[str, Any] = source_inputs["input_ids"].squeeze()
__UpperCAmelCase : Union[str, Any] = target_inputs["input_ids"].squeeze()
__UpperCAmelCase : int = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Tuple ) -> Optional[Any]:
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str ) -> Dict[str, torch.Tensor]:
__UpperCAmelCase : str = torch.stack([x["input_ids"] for x in batch] )
__UpperCAmelCase : int = torch.stack([x["attention_mask"] for x in batch] )
__UpperCAmelCase : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] )
__UpperCAmelCase : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : Tuple = trim_batch(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = trim_batch(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase )
__UpperCAmelCase : str = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
_snake_case = getLogger(__name__)
def _UpperCamelCase ( snake_case__ ) -> Dict:
return list(itertools.chain.from_iterable(snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = get_git_info()
save_json(snake_case__, os.path.join(snake_case__, "git_log.json" ) )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=4, **snake_case__ ) -> int:
with open(snake_case__, "w" ) as f:
json.dump(snake_case__, snake_case__, indent=snake_case__, **snake_case__ )
def _UpperCamelCase ( snake_case__ ) -> List[str]:
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Any = git.Repo(search_parent_directories=snake_case__ )
__UpperCAmelCase : Optional[Any] = {
"repo_id": str(snake_case__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List:
return list(map(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
with open(snake_case__, "wb" ) as f:
return pickle.dump(snake_case__, snake_case__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
def remove_articles(snake_case__ ):
return re.sub(r"\b(a|an|the)\b", " ", snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
__UpperCAmelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Any = normalize_answer(snake_case__ ).split()
__UpperCAmelCase : int = normalize_answer(snake_case__ ).split()
__UpperCAmelCase : Any = Counter(snake_case__ ) & Counter(snake_case__ )
__UpperCAmelCase : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
__UpperCAmelCase : Optional[Any] = 1.0 * num_same / len(snake_case__ )
__UpperCAmelCase : Tuple = 1.0 * num_same / len(snake_case__ )
__UpperCAmelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Dict:
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : List[Any] = 0
for hypo, pred in zip(snake_case__, snake_case__ ):
em += exact_match_score(snake_case__, snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def _UpperCamelCase ( snake_case__ ) -> Any:
return model_prefix.startswith("rag" )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCAmelCase : Union[str, Any] = "dropout_rate"
for p in extra_params:
if getattr(snake_case__, snake_case__, snake_case__ ):
if not hasattr(snake_case__, snake_case__ ) and not hasattr(snake_case__, equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case__ ) )
delattr(snake_case__, snake_case__ )
continue
__UpperCAmelCase : Dict = p if hasattr(snake_case__, snake_case__ ) else equivalent_param[p]
setattr(snake_case__, snake_case__, getattr(snake_case__, snake_case__ ) )
delattr(snake_case__, snake_case__ )
return hparams, config
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = XLMRobertaTokenizer
lowerCamelCase__: str = XLMRobertaTokenizerFast
lowerCamelCase__: List[str] = True
lowerCamelCase__: str = True
def _lowerCamelCase ( self: int ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Tuple = "<pad>"
__UpperCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def _lowerCamelCase ( self: Tuple ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : str = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : int = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : str = tempfile.mkdtemp()
__UpperCAmelCase : str = tokenizer_r.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__UpperCAmelCase : str = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Any = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase : Any = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def _lowerCamelCase ( self: str ) -> str:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
__UpperCAmelCase : Any = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Any:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : str = "I was born in 92000, and this is falsé."
__UpperCAmelCase : int = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : List[str] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = tokenizer.encode(__lowerCamelCase )
__UpperCAmelCase : Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = "Hello World!"
__UpperCAmelCase : Union[str, Any] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__UpperCAmelCase : List[Any] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
# fmt: off
__UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json'''}
_snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_snake_case = {'''mgp-str''': 27}
class _snake_case ( _lowercase ):
lowerCamelCase__: int = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict="[GO]" , __lowerCamelCase: Union[str, Any]="[GO]" , __lowerCamelCase: int="[s]" , __lowerCamelCase: List[str]="[GO]" , **__lowerCamelCase: Tuple ) -> List[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : str = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _lowerCamelCase ( self: Dict ) -> Tuple:
return len(self.vocab )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : int = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def _lowerCamelCase ( self: Any , __lowerCamelCase: Dict ) -> Optional[int]:
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple ) -> int:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
return (vocab_file,)
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''google/rembert''': 256,
}
_snake_case = '''▁'''
class _snake_case ( _lowercase ):
lowerCamelCase__: List[str] = VOCAB_FILES_NAMES
lowerCamelCase__: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: int = RemBertTokenizer
def __init__( self: str , __lowerCamelCase: List[str]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Any=True , __lowerCamelCase: Any=True , __lowerCamelCase: Any=False , __lowerCamelCase: Tuple="[CLS]" , __lowerCamelCase: str="[SEP]" , __lowerCamelCase: Tuple="<unk>" , __lowerCamelCase: Dict="[SEP]" , __lowerCamelCase: int="<pad>" , __lowerCamelCase: List[str]="[CLS]" , __lowerCamelCase: Optional[int]="[MASK]" , **__lowerCamelCase: Union[str, Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : Tuple = remove_space
__UpperCAmelCase : int = keep_accents
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self: str , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
__UpperCAmelCase : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _snake_case :
def __init__( self: Dict , __lowerCamelCase: int=2 , __lowerCamelCase: int=3 , __lowerCamelCase: Union[str, Any]=64 , __lowerCamelCase: List[str]=None ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = np.random.default_rng(__lowerCamelCase )
__UpperCAmelCase : Dict = length
__UpperCAmelCase : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self: Union[str, Any] ) -> Tuple:
return self.length
def __getitem__( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] ) -> Any:
return {"x": self.x[i], "y": self.y[i]}
class _snake_case ( torch.nn.Module ):
def __init__( self: int , __lowerCamelCase: List[Any]=0 , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Tuple=False ) -> int:
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int]=None ) -> int:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _snake_case ( torch.nn.Module ):
def __init__( self: Optional[int] , __lowerCamelCase: str=0 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Optional[int]=False ) -> Any:
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
__UpperCAmelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
__UpperCAmelCase : Tuple = True
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any]=None ) -> Any:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__UpperCAmelCase : List[Any] = False
return x * self.a + self.b
def _UpperCamelCase ( snake_case__, snake_case__ = 16 ) -> int:
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
__UpperCAmelCase : Tuple = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
__UpperCAmelCase : List[Any] = load_dataset("csv", data_files=snake_case__ )
__UpperCAmelCase : Optional[Any] = datasets["train"].unique("label" )
__UpperCAmelCase : Optional[Any] = {v: i for i, v in enumerate(snake_case__ )}
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : str = tokenizer(
examples["sentence1"], examples["sentence2"], truncation=snake_case__, max_length=snake_case__, padding="max_length" )
if "label" in examples:
__UpperCAmelCase : Any = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : str = datasets.map(
snake_case__, batched=snake_case__, remove_columns=["sentence1", "sentence2", "label"], )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(snake_case__, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[int] = DataLoader(tokenized_datasets["train"], shuffle=snake_case__, collate_fn=snake_case__, batch_size=2 )
__UpperCAmelCase : str = DataLoader(tokenized_datasets["validation"], shuffle=snake_case__, collate_fn=snake_case__, batch_size=1 )
return train_dataloader, eval_dataloader
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import math
import random
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.0_2
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : Optional[int] = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
__UpperCAmelCase : List[str] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__UpperCAmelCase : Tuple = (expected / 100) - layer_a
# Error delta
__UpperCAmelCase : Optional[Any] = layer_1_error * sigmoid_function(snake_case__, snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('''Expected value: '''))
_snake_case = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Any = mf_knapsack(i - 1, snake_case__, snake_case__, snake_case__ )
else:
__UpperCAmelCase : Optional[Any] = max(
mf_knapsack(i - 1, snake_case__, snake_case__, snake_case__ ), mf_knapsack(i - 1, snake_case__, snake_case__, j - wt[i - 1] ) + val[i - 1], )
__UpperCAmelCase : List[str] = val
return f[i][j]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
__UpperCAmelCase : Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
if not (isinstance(snake_case__, (list, tuple) ) and isinstance(snake_case__, (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCAmelCase : Union[str, Any] = len(snake_case__ )
if num_items != len(snake_case__ ):
__UpperCAmelCase : Dict = (
"The number of weights must be the same as the number of values.\n"
f'''But got {num_items} weights and {len(snake_case__ )} values'''
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i], snake_case__ ):
__UpperCAmelCase : str = (
"All weights must be integers but got weight of "
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : int = knapsack(snake_case__, snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase : set = set()
_construct_solution(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
return optimal_val, example_optional_set
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__, snake_case__, i - 1, snake_case__, snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__, snake_case__, i - 1, j - wt[i - 1], snake_case__ )
if __name__ == "__main__":
_snake_case = [3, 2, 4, 4]
_snake_case = [4, 3, 2, 3]
_snake_case = 4
_snake_case = 6
_snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_snake_case , _snake_case = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_snake_case , _snake_case = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
from __future__ import annotations
_snake_case = []
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> bool:
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__, -1, -1 ), range(snake_case__, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__, -1, -1 ), range(snake_case__, len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def _UpperCamelCase ( snake_case__, snake_case__ ) -> bool:
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__, snake_case__, snake_case__ ):
__UpperCAmelCase : Optional[int] = 1
solve(snake_case__, row + 1 )
__UpperCAmelCase : str = 0
return False
def _UpperCamelCase ( snake_case__ ) -> None:
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print("Q", end=" " )
else:
print(".", end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case = 8
_snake_case = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=snake_case__, default=1, help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script", type=snake_case__, help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
), )
# rest from the training program
parser.add_argument("training_script_args", nargs=snake_case__ )
return parser.parse_args()
def _UpperCamelCase ( ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = parse_args()
# Import training_script as a module.
__UpperCAmelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCAmelCase : Dict = script_fpath.stem
__UpperCAmelCase : Dict = importlib.import_module(snake_case__ )
# Patch sys.argv
__UpperCAmelCase : int = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__ ) -> list[int]: # This function is recursive
__UpperCAmelCase : Dict = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase : Any = array[0]
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = 1
__UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase : Union[str, Any] = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
__UpperCAmelCase : List[str] = temp_array
else:
i += 1
__UpperCAmelCase : List[str] = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase : Optional[Any] = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: int=13 , __lowerCamelCase: Dict=[30, 30] , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=32 , __lowerCamelCase: int=5 , __lowerCamelCase: str=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: List[str]=0.02 , __lowerCamelCase: int=3 , __lowerCamelCase: Dict=None , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: Any=10 , ) -> str:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : str = scope
__UpperCAmelCase : Tuple = n_targets
__UpperCAmelCase : Any = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__UpperCAmelCase : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__UpperCAmelCase : Optional[Any] = num_patches + 1 + self.num_detection_tokens
def _lowerCamelCase ( self: List[Any] ) -> int:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__UpperCAmelCase : int = []
for i in range(self.batch_size ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowerCamelCase )
__UpperCAmelCase : List[Any] = torch.rand(self.n_targets , 4 , device=__lowerCamelCase )
labels.append(__lowerCamelCase )
__UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] ) -> Any:
__UpperCAmelCase : Tuple = YolosModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: int ) -> List[str]:
__UpperCAmelCase : Optional[int] = YolosForObjectDetection(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(pixel_values=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__UpperCAmelCase : Optional[Any] = model(pixel_values=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__: Optional[int] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: Tuple = False
lowerCamelCase__: List[str] = False
lowerCamelCase__: int = False
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any]=False ) -> str:
__UpperCAmelCase : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__UpperCAmelCase : Any = []
for i in range(self.model_tester.batch_size ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowerCamelCase , dtype=torch.long )
__UpperCAmelCase : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=__lowerCamelCase , dtype=torch.float )
labels.append(__lowerCamelCase )
__UpperCAmelCase : List[Any] = labels
return inputs_dict
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[str] = YolosModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__lowerCamelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = True
# in YOLOS, the seq_len is different
__UpperCAmelCase : Optional[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCAmelCase : List[Any] = len(__lowerCamelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : int = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(__lowerCamelCase ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self: int ) -> Tuple:
def check_hidden_states_output(__lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] ):
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Any = outputs.hidden_states
__UpperCAmelCase : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# YOLOS has a different seq_length
__UpperCAmelCase : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Any = YolosModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[str]:
__UpperCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: str ) -> Optional[int]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: int ) -> Any:
__UpperCAmelCase : List[Any] = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__lowerCamelCase )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Any = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Any = model(inputs.pixel_values )
# verify outputs
__UpperCAmelCase : Optional[Any] = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__lowerCamelCase , )
__UpperCAmelCase : Tuple = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify postprocessing
__UpperCAmelCase : List[str] = image_processor.post_process_object_detection(
__lowerCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__UpperCAmelCase : str = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(__lowerCamelCase )
__UpperCAmelCase : Tuple = [75, 75, 17, 63, 17]
__UpperCAmelCase : Tuple = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(__lowerCamelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __lowerCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __lowerCamelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __lowerCamelCase ) )
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ['''text''', '''image''', '''audio''']
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Tuple = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(snake_case__, snake_case__ ):
inputs.append(create_inputs(snake_case__ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = []
for output in outputs:
if isinstance(snake_case__, (str, AgentText) ):
output_types.append("text" )
elif isinstance(snake_case__, (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(snake_case__, (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _snake_case :
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__UpperCAmelCase : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__UpperCAmelCase : List[str] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : Union[str, Any] = create_inputs(self.tool.inputs )
__UpperCAmelCase : Optional[Any] = self.tool(*__lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
__UpperCAmelCase : List[str] = [outputs]
self.assertListEqual(output_types(__lowerCamelCase ) , self.tool.outputs )
def _lowerCamelCase ( self: int ) -> List[str]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowerCamelCase ( self: Tuple ) -> Dict:
__UpperCAmelCase : Dict = create_inputs(self.tool.inputs )
__UpperCAmelCase : Optional[int] = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Union[str, Any] = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCamelCase , self.tool.outputs ):
__UpperCAmelCase : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = create_inputs(self.tool.inputs )
__UpperCAmelCase : int = []
for _input, input_type in zip(__lowerCamelCase , self.tool.inputs ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__UpperCAmelCase : str = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
| 342 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 1 |
import doctest
from collections import deque
import numpy as np
class _snake_case :
def __init__( self: Any ) -> None:
__UpperCAmelCase : str = [2, 1, 2, -1]
__UpperCAmelCase : Optional[Any] = [1, 2, 3, 4]
def _lowerCamelCase ( self: List[Any] ) -> list[float]:
__UpperCAmelCase : List[Any] = len(self.first_signal )
__UpperCAmelCase : List[str] = len(self.second_signal )
__UpperCAmelCase : Tuple = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
__UpperCAmelCase : str = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Dict = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase : int = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
from scipy.stats import spearmanr
import datasets
_snake_case = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
_snake_case = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
_snake_case = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any=False ) -> Tuple:
__UpperCAmelCase : int = spearmanr(__lowerCamelCase , __lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__UpperCAmelCase : Optional[int] = [p / w for p, w in zip(snake_case__, snake_case__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__UpperCAmelCase : str = sorted(snake_case__ )
# declaring useful variables
__UpperCAmelCase : int = len(snake_case__ )
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Union[str, Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__UpperCAmelCase : Union[str, Any] = sorted_profit_by_weight[length - i - 1]
__UpperCAmelCase : List[str] = profit_by_weight.index(snake_case__ )
__UpperCAmelCase : Optional[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_snake_case = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_snake_case = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_snake_case = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _snake_case ( _lowercase ):
@slow
@require_torch
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : int = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
__UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
__UpperCAmelCase : int = bertabert.config.encoder.vocab_size
__UpperCAmelCase : List[str] = tokenizer.sep_token_id
__UpperCAmelCase : List[str] = tokenizer.cls_token_id
__UpperCAmelCase : Dict = 1_28
__UpperCAmelCase : Optional[int] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
__UpperCAmelCase : List[Any] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
__UpperCAmelCase : List[str] = train_dataset.select(range(32 ) )
__UpperCAmelCase : int = val_dataset.select(range(16 ) )
__UpperCAmelCase : str = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase: Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCAmelCase : Union[str, Any] = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=5_12 )
__UpperCAmelCase : List[str] = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=1_28 )
__UpperCAmelCase : Any = inputs.input_ids
__UpperCAmelCase : int = inputs.attention_mask
__UpperCAmelCase : Optional[int] = outputs.input_ids
__UpperCAmelCase : Tuple = outputs.input_ids.copy()
__UpperCAmelCase : int = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
__UpperCAmelCase : List[str] = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 5_12 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase: Optional[Any] ):
__UpperCAmelCase : Dict = pred.label_ids
__UpperCAmelCase : str = pred.predictions
# all unnecessary tokens are removed
__UpperCAmelCase : int = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
__UpperCAmelCase : str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
__UpperCAmelCase : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
__UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCAmelCase : str = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__UpperCAmelCase : Tuple = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train()
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
import warnings
from .generation import TFGenerationMixin
class _snake_case ( _lowercase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowercase , )
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_snake_case = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_snake_case = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_snake_case = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Any ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def _lowerCamelCase ( self: Any , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[str]=False ) -> Union[str, Any]:
if rouge_types is None:
__UpperCAmelCase : int = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__UpperCAmelCase : Optional[Any] = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : Tuple = scoring.BootstrapAggregator()
else:
__UpperCAmelCase : int = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Dict = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : str = aggregator.aggregate()
else:
__UpperCAmelCase : Optional[int] = {}
for key in scores[0]:
__UpperCAmelCase : Dict = [score[key] for score in scores]
return result
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: str = ["onnx"]
def __init__( self: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Optional[Any] ) -> Tuple:
requires_backends(self , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls: str , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Tuple:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls: Dict , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[str] ) -> Tuple:
requires_backends(cls , ["onnx"] )
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case = ['''small''', '''medium''', '''large''']
_snake_case = '''lm_head.decoder.weight'''
_snake_case = '''lm_head.weight'''
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : str = torch.load(snake_case__ )
__UpperCAmelCase : str = d.pop(snake_case__ )
os.makedirs(snake_case__, exist_ok=snake_case__ )
torch.save(snake_case__, os.path.join(snake_case__, snake_case__ ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
_snake_case = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def _UpperCamelCase ( snake_case__, snake_case__=1.0, snake_case__=None, snake_case__=None ) -> Optional[Any]:
if rng is None:
__UpperCAmelCase : int = global_rng
__UpperCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
def __init__( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: List[str]=4_00 , __lowerCamelCase: int=20_00 , __lowerCamelCase: List[str]=10 , __lowerCamelCase: Any=1_60 , __lowerCamelCase: int=8 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[int]=40_00 , __lowerCamelCase: Tuple=False , __lowerCamelCase: Optional[int]=True , ) -> Union[str, Any]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = min_seq_length
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Dict = padding_value
__UpperCAmelCase : Optional[Any] = sampling_rate
__UpperCAmelCase : Union[str, Any] = return_attention_mask
__UpperCAmelCase : List[str] = do_normalize
__UpperCAmelCase : List[str] = feature_size
__UpperCAmelCase : Optional[Any] = chunk_length
__UpperCAmelCase : Dict = hop_length
def _lowerCamelCase ( self: Dict ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[str]=False ) -> List[Any]:
def _flatten(__lowerCamelCase: Optional[int] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : Tuple = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
__UpperCAmelCase : str = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : int = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__UpperCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.feature_extraction_class.from_json_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = feat_extract_first.to_dict()
__UpperCAmelCase : int = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__UpperCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Union[str, Any] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase : Optional[Any] = feature_extractor(__lowerCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : Union[str, Any] = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
__UpperCAmelCase : Optional[Any] = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCAmelCase : Union[str, Any] = np.asarray(__lowerCamelCase )
__UpperCAmelCase : str = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
__UpperCAmelCase : Tuple = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test truncation required
__UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__UpperCAmelCase : Dict = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
__UpperCAmelCase : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase : Dict = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs_truncated]
__UpperCAmelCase : Union[str, Any] = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
__UpperCAmelCase : Tuple = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _lowerCamelCase ( self: Tuple ) -> str:
import torch
__UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : str = np.random.rand(1_00 , 32 ).astype(np.floataa )
__UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : List[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self: str , __lowerCamelCase: str ) -> Any:
__UpperCAmelCase : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCAmelCase : Optional[Any] = ds.sort("id" ).select(range(__lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self: Dict ) -> Tuple:
# fmt: off
__UpperCAmelCase : int = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
__UpperCAmelCase : Any = self._load_datasamples(1 )
__UpperCAmelCase : Dict = WhisperFeatureExtractor()
__UpperCAmelCase : str = feature_extractor(__lowerCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCamelCase , atol=1e-4 ) )
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : List[Any] = self._load_datasamples(1 )[0]
__UpperCAmelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__UpperCAmelCase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase ) - 1 ) < 1e-3 ) )
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
# Initialise PyTorch model
__UpperCAmelCase : List[Any] = MobileBertConfig.from_json_file(snake_case__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Optional[Any] = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
__UpperCAmelCase : Union[str, Any] = load_tf_weights_in_mobilebert(snake_case__, snake_case__, snake_case__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> np.array:
__UpperCAmelCase : List[str] = int(np.ceil((x_end - xa) / step_size ) )
__UpperCAmelCase : Any = np.zeros((n + 1,) )
__UpperCAmelCase : Any = ya
__UpperCAmelCase : Optional[int] = xa
for k in range(snake_case__ ):
__UpperCAmelCase : int = y[k] + step_size * ode_func(snake_case__, y[k] )
__UpperCAmelCase : Tuple = y[k] + (
(step_size / 2) * (ode_func(snake_case__, y[k] ) + ode_func(x + step_size, snake_case__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__UpperCAmelCase : Tuple = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : int = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : str = max(len(snake_case__ ), len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ), b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import math
import os
import sys
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Any = ""
try:
with open(snake_case__, "rb" ) as binary_file:
__UpperCAmelCase : str = binary_file.read()
for dat in data:
__UpperCAmelCase : List[Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> None:
lexicon.pop(snake_case__ )
__UpperCAmelCase : Dict = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase : Any = "0" + lexicon[curr_key]
__UpperCAmelCase : List[Any] = bin(snake_case__ )[2:]
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : List[Any] = {"0": "0", "1": "1"}
__UpperCAmelCase , __UpperCAmelCase : Tuple = "", ""
__UpperCAmelCase : Optional[Any] = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__, snake_case__, snake_case__, snake_case__ )
index += 1
__UpperCAmelCase : Dict = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase : Dict = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Dict = os.path.getsize(snake_case__ )
__UpperCAmelCase : Tuple = bin(snake_case__ )[2:]
__UpperCAmelCase : List[Any] = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Tuple = 8
try:
with open(snake_case__, "wb" ) as opened_file:
__UpperCAmelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0, len(snake_case__ ), snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__, 2 ).to_bytes(1, byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : List[Any] = read_file_binary(snake_case__ )
__UpperCAmelCase : int = compress_data(snake_case__ )
__UpperCAmelCase : Dict = add_file_length(snake_case__, snake_case__ )
write_file_binary(snake_case__, snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Any = ["torch", "transformers", "onnx"]
def __init__( self: Tuple , *__lowerCamelCase: str , **__lowerCamelCase: Dict ) -> str:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , *__lowerCamelCase: Dict , **__lowerCamelCase: List[Any] ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: List[str] , *__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Dict = ["torch", "transformers", "onnx"]
def __init__( self: Optional[Any] , *__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , *__lowerCamelCase: Dict , **__lowerCamelCase: List[Any] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: int , *__lowerCamelCase: Dict , **__lowerCamelCase: int ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Dict = ["torch", "transformers", "onnx"]
def __init__( self: Any , *__lowerCamelCase: int , **__lowerCamelCase: Any ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: List[Any] ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Dict = ["torch", "transformers", "onnx"]
def __init__( self: Union[str, Any] , *__lowerCamelCase: Any , **__lowerCamelCase: Tuple ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Dict , *__lowerCamelCase: Dict , **__lowerCamelCase: int ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: List[Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: int ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Any = ["torch", "transformers", "onnx"]
def __init__( self: Any , *__lowerCamelCase: int , **__lowerCamelCase: Any ) -> List[str]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: List[str] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Dict ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: int , *__lowerCamelCase: List[Any] , **__lowerCamelCase: str ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=_lowercase ):
lowerCamelCase__: Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self: List[str] , *__lowerCamelCase: Tuple , **__lowerCamelCase: Optional[Any] ) -> Optional[int]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: List[str] , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: int ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: List[Any] ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = model.config
__UpperCAmelCase : int = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, )
__UpperCAmelCase : List[str] = MBartConfig(
is_decoder=snake_case__, is_encoder_decoder=snake_case__, add_cross_attention=snake_case__, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=snake_case__, add_final_layer_norm=snake_case__, )
return encoder_config, decoder_config
def _UpperCamelCase ( snake_case__ ) -> Any:
if "encoder.model" in name:
__UpperCAmelCase : Optional[Any] = name.replace("encoder.model", "encoder" )
if "decoder.model" in name:
__UpperCAmelCase : Optional[int] = name.replace("decoder.model", "decoder" )
if "patch_embed.proj" in name:
__UpperCAmelCase : List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Optional[Any] = name.replace("patch_embed.norm", "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
__UpperCAmelCase : Tuple = "encoder." + name
if "attn.proj" in name:
__UpperCAmelCase : List[str] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name and "mask" not in name:
__UpperCAmelCase : Optional[Any] = name.replace("attn", "attention.self" )
if "norm1" in name:
__UpperCAmelCase : Dict = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : int = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : List[Any] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : List[Any] = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
__UpperCAmelCase : Any = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
__UpperCAmelCase : Any = "encoder.layernorm.bias"
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Union[str, Any] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__UpperCAmelCase : Union[str, Any] = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[3] )
__UpperCAmelCase : str = int(key_split[5] )
__UpperCAmelCase : List[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase : Optional[Any] = val[:dim, :]
__UpperCAmelCase : List[Any] = val[dim : dim * 2, :]
__UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
__UpperCAmelCase : str = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : Optional[int] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCAmelCase : int = val
return orig_state_dict
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Dict:
# load original model
__UpperCAmelCase : int = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_configs(snake_case__ )
__UpperCAmelCase : Dict = DonutSwinModel(snake_case__ )
__UpperCAmelCase : Tuple = MBartForCausalLM(snake_case__ )
__UpperCAmelCase : str = VisionEncoderDecoderModel(encoder=snake_case__, decoder=snake_case__ )
model.eval()
__UpperCAmelCase : str = original_model.state_dict()
__UpperCAmelCase : int = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
__UpperCAmelCase : List[Any] = load_dataset("hf-internal-testing/example-documents" )
__UpperCAmelCase : str = dataset["test"][0]["image"].convert("RGB" )
__UpperCAmelCase : Dict = XLMRobertaTokenizerFast.from_pretrained(snake_case__, from_slow=snake_case__ )
__UpperCAmelCase : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
__UpperCAmelCase : List[str] = DonutProcessor(snake_case__, snake_case__ )
__UpperCAmelCase : int = processor(snake_case__, return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCAmelCase : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : int = "When is the coffee break?"
__UpperCAmelCase : Union[str, Any] = task_prompt.replace("{user_input}", snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCAmelCase : List[Any] = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCAmelCase : Union[str, Any] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCAmelCase : int = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCAmelCase : Dict = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCAmelCase : Tuple = "hello world"
else:
raise ValueError("Model name not supported" )
__UpperCAmelCase : int = original_model.decoder.tokenizer(snake_case__, add_special_tokens=snake_case__, return_tensors="pt" )[
"input_ids"
]
__UpperCAmelCase : Any = original_model.encoder.model.patch_embed(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : int = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__, snake_case__, atol=1e-3 )
# verify encoder hidden states
__UpperCAmelCase : int = original_model.encoder(snake_case__ )
__UpperCAmelCase : Union[str, Any] = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__, snake_case__, atol=1e-2 )
# verify decoder hidden states
__UpperCAmelCase : List[Any] = original_model(snake_case__, snake_case__, snake_case__ ).logits
__UpperCAmelCase : List[Any] = model(snake_case__, decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__, snake_case__, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
_snake_case = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
def _UpperCamelCase ( snake_case__ ) -> int:
assert isinstance(snake_case__, snake_case__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCAmelCase : Any = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(snake_case__ )
else:
__UpperCAmelCase : int = sylvester(number - 1 )
__UpperCAmelCase : Tuple = num - 1
__UpperCAmelCase : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( _lowercase ):
def __init__( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=13 , __lowerCamelCase: Optional[Any]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=True , __lowerCamelCase: str=True , __lowerCamelCase: List[str]=False , __lowerCamelCase: Any=False , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: List[Any]=99 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: int=32 , __lowerCamelCase: Any=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: List[Any]=0.1 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=12 , __lowerCamelCase: Any=2 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Any=4 , __lowerCamelCase: List[Any]="last" , __lowerCamelCase: Any=None , __lowerCamelCase: List[str]=None , ) -> Dict:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : int = use_input_lengths
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : int = gelu_activation
__UpperCAmelCase : List[str] = sinusoidal_embeddings
__UpperCAmelCase : Optional[Any] = causal
__UpperCAmelCase : str = asm
__UpperCAmelCase : Optional[int] = n_langs
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : int = n_special
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[int] = num_choices
__UpperCAmelCase : Dict = summary_type
__UpperCAmelCase : Optional[int] = use_proj
__UpperCAmelCase : List[str] = scope
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_input_lengths:
__UpperCAmelCase : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , ) -> str:
__UpperCAmelCase : Dict = FlaubertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : str = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase , langs=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , ) -> List[str]:
__UpperCAmelCase : Optional[Any] = FlaubertWithLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = FlaubertForQuestionAnsweringSimple(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , ) -> Optional[int]:
__UpperCAmelCase : Tuple = FlaubertForQuestionAnswering(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = model(__lowerCamelCase )
__UpperCAmelCase : List[str] = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , )
__UpperCAmelCase : Tuple = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , )
((__UpperCAmelCase) , ) : int = result_with_labels.to_tuple()
__UpperCAmelCase : List[str] = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
((__UpperCAmelCase) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , ) -> List[Any]:
__UpperCAmelCase : int = FlaubertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : int = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , ) -> Any:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : List[str] = FlaubertForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: str , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: int , ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = FlaubertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__: List[str] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] ) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: str=False ) -> int:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase : str = FlaubertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 )
def _lowerCamelCase ( self: int ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> Tuple:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> Any:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = FlaubertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[int] = model_class(config=__lowerCamelCase )
__UpperCAmelCase : int = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = torch.jit.trace(
__lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , "traced_model.pt" ) )
__UpperCAmelCase : Any = torch.jit.load(os.path.join(__lowerCamelCase , "traced_model.pt" ) , map_location=__lowerCamelCase )
loaded(inputs_dict["input_ids"].to(__lowerCamelCase ) , inputs_dict["attention_mask"].to(__lowerCamelCase ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase : str = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__UpperCAmelCase : Dict = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase )[0]
__UpperCAmelCase : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase ( snake_case__ ) -> List[Tuple[int, ...]]:
__UpperCAmelCase : int = []
if isinstance(snake_case__, snake_case__ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple[int, ...]:
__UpperCAmelCase : List[str] = []
for d in reversed(snake_case__ ):
idx.append(flat_idx % d )
__UpperCAmelCase : Optional[Any] = flat_idx // d
return tuple(reversed(snake_case__ ) )
@torch.jit.ignore
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ = None, snake_case__ = None, ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(snake_case__ ) -> None:
__UpperCAmelCase : List[Any] = True
for i in range(len(snake_case__ ) ):
__UpperCAmelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
__UpperCAmelCase : Tuple = l[reversed_idx]
if start_edges is None:
__UpperCAmelCase : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(snake_case__ )
if end_edges is None:
__UpperCAmelCase : Union[str, Any] = [e == (d - 1) for e, d in zip(snake_case__, snake_case__ )]
reduce_edge_list(snake_case__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case__ ) == 0:
return [()]
elif len(snake_case__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
__UpperCAmelCase : List[Tuple[slice, ...]] = []
__UpperCAmelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case__, snake_case__ ):
if s == e:
path_list.append(slice(snake_case__, s + 1 ) )
else:
break
__UpperCAmelCase : Tuple[slice, ...] = tuple(snake_case__ )
__UpperCAmelCase : int = len(snake_case__ )
# start == end, and we're done
if divergence_idx == len(snake_case__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__UpperCAmelCase : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(snake_case__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__UpperCAmelCase : Tuple = end[divergence_idx]
return tuple(
path + (slice(snake_case__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__UpperCAmelCase : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> torch.Tensor:
__UpperCAmelCase : List[str] = t.shape[:no_batch_dims]
__UpperCAmelCase : List[Any] = list(_flat_idx_to_idx(snake_case__, snake_case__ ) )
# _get_minimal_slice_set is inclusive
__UpperCAmelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1, snake_case__ ) )
# Get an ordered list of slices to perform
__UpperCAmelCase : Dict = _get_minimal_slice_set(
snake_case__, snake_case__, snake_case__, )
__UpperCAmelCase : Optional[int] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = False, snake_case__ = None, snake_case__ = False, ) -> Any:
if not (len(snake_case__ ) > 0):
raise ValueError("Must provide at least one input" )
__UpperCAmelCase : Any = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case__ )]
__UpperCAmelCase : Optional[Any] = tuple([max(snake_case__ ) for s in zip(*snake_case__ )] )
def _prep_inputs(snake_case__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__UpperCAmelCase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__UpperCAmelCase : Dict = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
__UpperCAmelCase : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__UpperCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs, snake_case__ )
__UpperCAmelCase : int = None
if _out is not None:
__UpperCAmelCase : List[Any] = tensor_tree_map(lambda snake_case__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
__UpperCAmelCase : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__UpperCAmelCase : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__UpperCAmelCase : str = 0
__UpperCAmelCase : List[Any] = prepped_outputs
for _ in range(snake_case__ ):
# Chunk the input
if not low_mem:
__UpperCAmelCase : Union[str, Any] = _select_chunk
else:
__UpperCAmelCase : str = partial(
_chunk_slice, flat_start=snake_case__, flat_end=min(snake_case__, i + chunk_size ), no_batch_dims=len(snake_case__ ), )
__UpperCAmelCase : Dict[str, Any] = tensor_tree_map(snake_case__, snake_case__ )
# Run the layer on the chunk
__UpperCAmelCase : Any = layer(**snake_case__ )
# Allocate space for the output
if out is None:
__UpperCAmelCase : Dict = tensor_tree_map(lambda snake_case__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), snake_case__ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case__, snake_case__ ):
def assign(snake_case__, snake_case__ ) -> None:
for k, v in da.items():
if isinstance(snake_case__, snake_case__ ):
assign(snake_case__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__UpperCAmelCase : str = da[k]
assign(snake_case__, snake_case__ )
elif isinstance(snake_case__, snake_case__ ):
for xa, xa in zip(snake_case__, snake_case__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__UpperCAmelCase : Tuple = xa
elif isinstance(snake_case__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__UpperCAmelCase : str = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
__UpperCAmelCase : int = tensor_tree_map(lambda snake_case__ : t.view(orig_batch_dims + t.shape[1:] ), snake_case__ )
return out
class _snake_case :
def __init__( self: Any , __lowerCamelCase: int = 5_12 , ) -> Any:
__UpperCAmelCase : Union[str, Any] = max_chunk_size
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[tuple] = None
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Callable , __lowerCamelCase: tuple , __lowerCamelCase: int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__UpperCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__UpperCAmelCase : int = [c for c in candidates if c > min_chunk_size]
__UpperCAmelCase : Any = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCamelCase: int ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase , chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
__UpperCAmelCase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
__UpperCAmelCase : List[str] = (min_viable_chunk_size_index + i) // 2
else:
__UpperCAmelCase : str = i
__UpperCAmelCase : Dict = (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Iterable , __lowerCamelCase: Iterable ) -> bool:
__UpperCAmelCase : Dict = True
for aa, aa in zip(__lowerCamelCase , __lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
__UpperCAmelCase : Any = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Callable , __lowerCamelCase: tuple , __lowerCamelCase: int , ) -> int:
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : tuple = tree_map(lambda __lowerCamelCase : a.shape if isinstance(__lowerCamelCase , torch.Tensor ) else a , __lowerCamelCase , __lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , __lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
__UpperCAmelCase : List[Any] = False
if not consistent:
__UpperCAmelCase : int = self._determine_favorable_chunk_size(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
__UpperCAmelCase : Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class _snake_case :
def __init__( self: Union[str, Any] , __lowerCamelCase: str = None , __lowerCamelCase: uuid.UUID = None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None ) -> Dict:
if not conversation_id:
__UpperCAmelCase : Tuple = uuid.uuida()
if past_user_inputs is None:
__UpperCAmelCase : Any = []
if generated_responses is None:
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : uuid.UUID = conversation_id
__UpperCAmelCase : List[str] = past_user_inputs
__UpperCAmelCase : List[str] = generated_responses
__UpperCAmelCase : Optional[str] = text
def __eq__( self: Tuple , __lowerCamelCase: Dict ) -> List[str]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self: Any , __lowerCamelCase: str , __lowerCamelCase: bool = False ) -> List[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
__UpperCAmelCase : str = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
__UpperCAmelCase : int = text
def _lowerCamelCase ( self: List[Any] ) -> Dict:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCAmelCase : Any = None
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str ) -> int:
self.generated_responses.append(__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Optional[int] ) -> int:
__UpperCAmelCase : Tuple = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
__UpperCAmelCase : Optional[int] = "user" if is_user else "bot"
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_lowercase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class _snake_case ( _lowercase ):
def __init__( self: str , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Dict ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__UpperCAmelCase : Any = self.tokenizer.eos_token
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[str]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Tuple=None , **__lowerCamelCase: int ) -> int:
__UpperCAmelCase : int = {}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = {}
if min_length_for_response is not None:
__UpperCAmelCase : Dict = min_length_for_response
if minimum_tokens is not None:
__UpperCAmelCase : str = minimum_tokens
if "max_length" in generate_kwargs:
__UpperCAmelCase : Any = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase : Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Dict , __lowerCamelCase: Union[Conversation, List[Conversation]] , __lowerCamelCase: List[str]=0 , **__lowerCamelCase: str ) -> Optional[Any]:
__UpperCAmelCase : Tuple = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Conversation , __lowerCamelCase: str=32 ) -> Dict[str, Any]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__UpperCAmelCase : Optional[int] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCAmelCase : Dict = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__UpperCAmelCase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCAmelCase : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any]=10 , **__lowerCamelCase: str ) -> int:
__UpperCAmelCase : str = generate_kwargs.get("max_length" , self.model.config.max_length )
__UpperCAmelCase : Any = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
__UpperCAmelCase : List[Any] = max_length - minimum_tokens
__UpperCAmelCase : Union[str, Any] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCAmelCase : Any = model_inputs["attention_mask"][:, -trim:]
__UpperCAmelCase : Dict = model_inputs.pop("conversation" )
__UpperCAmelCase : Tuple = max_length
__UpperCAmelCase : Tuple = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__UpperCAmelCase : Optional[int] = 1
else:
__UpperCAmelCase : Any = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict=True ) -> List[Any]:
__UpperCAmelCase : List[str] = model_outputs["output_ids"]
__UpperCAmelCase : Tuple = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__UpperCAmelCase : List[Any] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Conversation ) -> Dict:
__UpperCAmelCase : int = self.tokenizer.eos_token_id
__UpperCAmelCase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__UpperCAmelCase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 342 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 1 |
import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
import argparse
import os
import re
_snake_case = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
_snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> Optional[int]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = content.split("\n" )
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : Any = len(re.search(r"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : Optional[int] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : str = sorted(snake_case__, key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write("\n".join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def _UpperCamelCase ( snake_case__ = False ) -> str:
__UpperCAmelCase : Dict = [os.path.join(snake_case__, snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith(".py" )]
__UpperCAmelCase : Any = [sort_auto_mapping(snake_case__, overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
__UpperCAmelCase : Dict = [f for f, d in zip(snake_case__, snake_case__ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _snake_case ( unittest.TestCase ):
lowerCamelCase__: Dict = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__UpperCAmelCase : Dict = VideoClassificationPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase , top_k=2 )
__UpperCAmelCase : int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> List[Any]:
for example in examples:
__UpperCAmelCase : Optional[Any] = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{"score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase )},
] , )
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Optional[int] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__UpperCAmelCase : Dict = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
__UpperCAmelCase : int = pipeline(
"video-classification" , model=__lowerCamelCase , feature_extractor=__lowerCamelCase , frame_sampling_rate=4 )
__UpperCAmelCase : Any = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__UpperCAmelCase : List[Any] = video_classifier(__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
__UpperCAmelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import datasets
from .evaluate import evaluate
_snake_case = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
_snake_case = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
_snake_case = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: str ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__UpperCAmelCase : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__UpperCAmelCase : int = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_snake_case = logging.getLogger(__name__)
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Dict=None ) -> Dict:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__UpperCAmelCase : List[str] = None
def _lowerCamelCase ( self: Any , __lowerCamelCase: int ) -> Union[str, Any]:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
__UpperCAmelCase : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__UpperCAmelCase : Dict = str(distributed_port + 1 )
__UpperCAmelCase : Optional[Any] = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowerCamelCase ( self: Optional[Any] ) -> str:
return dist.get_rank(group=self.process_group ) == 0
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: str=torch.floataa ) -> List[Any]:
__UpperCAmelCase : str = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def _lowerCamelCase ( self: int , __lowerCamelCase: np.ndarray , __lowerCamelCase: int ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__UpperCAmelCase , __UpperCAmelCase : int = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
__UpperCAmelCase : Dict = dist.get_world_size(group=self.process_group )
# gather logic
__UpperCAmelCase : Tuple = None
if self._is_main():
__UpperCAmelCase : str = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
__UpperCAmelCase : Union[str, Any] = question_hidden_states.shape[0]
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
__UpperCAmelCase , __UpperCAmelCase : Dict = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : int = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
__UpperCAmelCase : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
__UpperCAmelCase : Union[str, Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCAmelCase : int = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCAmelCase : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase : Tuple = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCAmelCase : Any = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Optional[Any]=18 , __lowerCamelCase: Tuple=30 , __lowerCamelCase: int=4_00 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: str=True , __lowerCamelCase: Any=[0.5, 0.5, 0.5] , __lowerCamelCase: Optional[int]=[0.5, 0.5, 0.5] , ) -> Dict:
__UpperCAmelCase : Optional[int] = size if size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Optional[int] = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : Optional[Any] = size
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Tuple = image_mean
__UpperCAmelCase : Tuple = image_std
def _lowerCamelCase ( self: List[Any] ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _lowerCamelCase ( self: List[str] ) -> Dict:
# Initialize image_processing
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Tuple = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCamelCase ( self: Dict ) -> Tuple:
# Initialize image_processing
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCamelCase__: Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCamelCase__: int = "document_qa"
lowerCamelCase__: List[str] = AutoProcessor
lowerCamelCase__: List[str] = VisionEncoderDecoderModel
lowerCamelCase__: str = ["image", "text"]
lowerCamelCase__: Union[str, Any] = ["text"]
def __init__( self: str , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Optional[int] ) -> List[str]:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: "Image" , __lowerCamelCase: str ) -> int:
__UpperCAmelCase : Optional[int] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : Union[str, Any] = task_prompt.replace("{user_input}" , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.pre_processor.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" ).input_ids
__UpperCAmelCase : Union[str, Any] = self.pre_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] ) -> Optional[Any]:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Any ) -> Union[str, Any]:
__UpperCAmelCase : str = self.pre_processor.batch_decode(__lowerCamelCase )[0]
__UpperCAmelCase : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
__UpperCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
__UpperCAmelCase : Optional[Any] = re.sub(R"<.*?>" , "" , __lowerCamelCase , count=1 ).strip() # remove first task start token
__UpperCAmelCase : Union[str, Any] = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = ["pixel_values"]
def __init__( self: Optional[int] , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 2_55 , __lowerCamelCase: bool = True , __lowerCamelCase: int = 8 , **__lowerCamelCase: int , ) -> None:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : Optional[int] = do_pad
__UpperCAmelCase : Tuple = pad_size
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: np.ndarray , __lowerCamelCase: float , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: List[str] ) -> np.ndarray:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: np.ndarray , __lowerCamelCase: int , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : Dict = get_image_size(__lowerCamelCase )
__UpperCAmelCase : Dict = (old_height // size + 1) * size - old_height
__UpperCAmelCase : Optional[Any] = (old_width // size + 1) * size - old_width
return pad(__lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__lowerCamelCase )
def _lowerCamelCase ( self: int , __lowerCamelCase: ImageInput , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[float] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase: List[str] , ) -> Union[str, Any]:
__UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : str = do_pad if do_pad is not None else self.do_pad
__UpperCAmelCase : int = pad_size if pad_size is not None else self.pad_size
__UpperCAmelCase : Dict = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__UpperCAmelCase : Dict = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[Any] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_pad:
__UpperCAmelCase : int = [self.pad(__lowerCamelCase , size=__lowerCamelCase ) for image in images]
__UpperCAmelCase : Dict = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__UpperCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if index == number_of_items:
return 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Any = knapsack(snake_case__, snake_case__, snake_case__, snake_case__, index + 1 )
if weights[index] <= max_weight:
__UpperCAmelCase : Dict = values[index] + knapsack(
snake_case__, snake_case__, snake_case__, max_weight - weights[index], index + 1 )
return max(snake_case__, snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = RobertaTokenizer
lowerCamelCase__: Tuple = RobertaTokenizerFast
lowerCamelCase__: int = True
lowerCamelCase__: List[str] = {"cls_token": "<s>"}
def _lowerCamelCase ( self: int ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCAmelCase : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCAmelCase : int = {"unk_token": "<unk>"}
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _lowerCamelCase ( self: int , **__lowerCamelCase: Optional[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , **__lowerCamelCase: List[Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> int:
__UpperCAmelCase : str = "lower newer"
__UpperCAmelCase : Dict = "lower newer"
return input_text, output_text
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : Optional[int] = "lower newer"
__UpperCAmelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__UpperCAmelCase : Dict = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token]
__UpperCAmelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("roberta-base" )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = "Encode this sequence."
__UpperCAmelCase : int = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
__UpperCAmelCase : Optional[int] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = "Encode <mask> sequence"
__UpperCAmelCase : List[Any] = "Encode <mask>sequence"
__UpperCAmelCase : str = tokenizer.encode(__lowerCamelCase )
__UpperCAmelCase : int = encoded.index(__lowerCamelCase )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase )
__UpperCAmelCase : Dict = encoded.index(__lowerCamelCase )
__UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> int:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Tuple = "A, <mask> AllenNLP sentence."
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _lowerCamelCase ( self: Tuple ) -> Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Dict = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase : Optional[int] = f'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : List[str] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : Union[str, Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_snake_case = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _UpperCamelCase ( snake_case__, snake_case__=None ) -> List[Any]:
require_version(deps[pkg], snake_case__ )
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
def _UpperCamelCase ( snake_case__ = 10**9 ) -> int:
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Any ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) )
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=13 , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Any=32 , __lowerCamelCase: List[str]=0.25 , __lowerCamelCase: Optional[Any]=8 , __lowerCamelCase: str=True , __lowerCamelCase: Optional[Any]=10_24 , __lowerCamelCase: Tuple=32 , __lowerCamelCase: Tuple="relu6" , __lowerCamelCase: Any=0.1 , __lowerCamelCase: str=0.02 , __lowerCamelCase: str=True , __lowerCamelCase: str=True , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: List[Any]=None , ) -> Tuple:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = depth_multiplier
__UpperCAmelCase : str = min_depth
__UpperCAmelCase : str = tf_padding
__UpperCAmelCase : List[str] = int(last_hidden_size * depth_multiplier )
__UpperCAmelCase : str = output_stride
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Union[str, Any] = classifier_dropout_prob
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = scope
def _lowerCamelCase ( self: Optional[int] ) -> int:
__UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : int = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] ) -> Tuple:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__: Union[str, Any] = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : Optional[int] = MobileNetVaModelTester(self )
__UpperCAmelCase : str = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _lowerCamelCase ( self: int ) -> int:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__lowerCamelCase )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Tuple:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple ):
__UpperCAmelCase : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = outputs.hidden_states
__UpperCAmelCase : Dict = 26
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> Optional[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Tuple = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
def _UpperCamelCase ( snake_case__ ) -> bool:
__UpperCAmelCase : Any = 0
for ch in input_str:
__UpperCAmelCase : Optional[Any] = ord(snake_case__ )
__UpperCAmelCase : Union[str, Any] = pow(2, snake_case__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__: List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowerCamelCase__: Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowerCamelCase__: Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase__: List[str] = False
@property
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
return 32
@property
def _lowerCamelCase ( self: Tuple ) -> Any:
return 32
@property
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return self.time_input_dim
@property
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self: Tuple ) -> Dict:
return 1_00
@property
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCAmelCase : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCAmelCase : str = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self: List[Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self: List[str] ) -> Any:
__UpperCAmelCase : List[Any] = self.dummy_unet
__UpperCAmelCase : Optional[Any] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__UpperCAmelCase : Tuple = DDIMScheduler(**__lowerCamelCase )
__UpperCAmelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int]=0 ) -> Dict:
__UpperCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
__UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : List[str] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
__UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
__UpperCAmelCase : Tuple = torch.manual_seed(__lowerCamelCase )
else:
__UpperCAmelCase : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__UpperCAmelCase : Any = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = "cpu"
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
__UpperCAmelCase : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : List[str] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
__UpperCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCAmelCase : str = init_image.resize((5_12, 5_12) )
__UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__UpperCAmelCase : List[str] = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 2_55.0
__UpperCAmelCase : str = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCAmelCase : Dict = "A robot, 4k photo"
__UpperCAmelCase : str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__UpperCAmelCase : List[str] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : int = pipe_prior(
__lowerCamelCase , image=__lowerCamelCase , strength=0.85 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple()
__UpperCAmelCase : Dict = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="np" , )
__UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _snake_case ( _lowercase ):
def __init__( self: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any]=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: str=False , __lowerCamelCase: str=True , __lowerCamelCase: Dict=99 , __lowerCamelCase: str=32 , __lowerCamelCase: int=5 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Union[str, Any]=64 , __lowerCamelCase: str="gelu" , __lowerCamelCase: str=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: str=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Dict=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: int=2 , __lowerCamelCase: int=2 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: List[Any]=1 , ) -> List[Any]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Tuple = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : Optional[int] = q_groups
__UpperCAmelCase : List[Any] = k_groups
__UpperCAmelCase : int = v_groups
__UpperCAmelCase : Optional[Any] = post_attention_groups
__UpperCAmelCase : int = intermediate_groups
__UpperCAmelCase : Optional[Any] = output_groups
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: str ) -> Optional[int]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Optional[int] = SqueezeBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ) -> Tuple:
__UpperCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : List[str] = SqueezeBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> Dict:
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Tuple = SqueezeBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : Any = SqueezeBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Union[str, Any] = SqueezeBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : str = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__: Tuple = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__: Any = False
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
__UpperCAmelCase : List[Any] = SqueezeBertModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=__lowerCamelCase , dim=37 )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Dict:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = SqueezeBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> str:
__UpperCAmelCase : Dict = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__UpperCAmelCase : List[Any] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )[0]
__UpperCAmelCase : int = torch.Size((1, 3) )
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: int = "ibert"
def __init__( self: Union[str, Any] , __lowerCamelCase: List[str]=3_05_22 , __lowerCamelCase: str=7_68 , __lowerCamelCase: str=12 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Optional[Any]=5_12 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[str]=1e-12 , __lowerCamelCase: Optional[int]=1 , __lowerCamelCase: int=0 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: Tuple="absolute" , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[str]="none" , **__lowerCamelCase: Dict , ) -> int:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : Dict = quant_mode
__UpperCAmelCase : Tuple = force_dequant
class _snake_case ( _lowercase ):
@property
def _lowerCamelCase ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase ( snake_case__ ) -> Dict[str, torch.Tensor]:
__UpperCAmelCase : str = []
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Tuple = []
for rt in rc.restypes:
__UpperCAmelCase : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__UpperCAmelCase : Optional[Any] = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__UpperCAmelCase : int = torch.tensor(
snake_case__, dtype=torch.intaa, device=protein["aatype"].device, )
__UpperCAmelCase : str = torch.tensor(
snake_case__, dtype=torch.intaa, device=protein["aatype"].device, )
__UpperCAmelCase : Optional[Any] = torch.tensor(
snake_case__, dtype=torch.floataa, device=protein["aatype"].device, )
__UpperCAmelCase : List[Any] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__UpperCAmelCase : Tuple = restype_atomaa_to_atomaa[protein_aatype]
__UpperCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__UpperCAmelCase : str = residx_atomaa_mask
__UpperCAmelCase : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__UpperCAmelCase : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
__UpperCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__UpperCAmelCase : Union[str, Any] = torch.zeros([21, 37], dtype=torch.floataa, device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
__UpperCAmelCase : Union[str, Any] = rc.restype_atoa[restype_letter]
__UpperCAmelCase : str = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__UpperCAmelCase : Dict = rc.atom_order[atom_name]
__UpperCAmelCase : int = 1
__UpperCAmelCase : Optional[Any] = restype_atomaa_mask[protein_aatype]
__UpperCAmelCase : int = residx_atomaa_mask
return protein
def _UpperCamelCase ( snake_case__ ) -> Dict[str, np.ndarray]:
__UpperCAmelCase : Union[str, Any] = tree_map(lambda snake_case__ : torch.tensor(snake_case__, device=batch["aatype"].device ), snake_case__, np.ndarray )
__UpperCAmelCase : Dict = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ), make_atomaa_masks(snake_case__ ) )
return out
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
class _snake_case :
def __init__( self: Union[str, Any] , __lowerCamelCase: str = "" , __lowerCamelCase: bool = False ) -> None:
# Mapping from the first character of the prefix of the node
__UpperCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__UpperCAmelCase : Union[str, Any] = is_leaf
__UpperCAmelCase : List[str] = prefix
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> tuple[str, str, str]:
__UpperCAmelCase : Optional[int] = 0
for q, w in zip(self.prefix , __lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: list[str] ) -> None:
for word in words:
self.insert(__lowerCamelCase )
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCAmelCase : Any = RadixNode(prefix=__lowerCamelCase , is_leaf=__lowerCamelCase )
else:
__UpperCAmelCase : Tuple = self.nodes[word[0]]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = incoming_node.match(
__lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCAmelCase : Tuple = remaining_prefix
__UpperCAmelCase : Any = self.nodes[matching_string[0]]
__UpperCAmelCase : Optional[int] = RadixNode(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = aux_node
if remaining_word == "":
__UpperCAmelCase : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str ) -> bool:
__UpperCAmelCase : List[str] = self.nodes.get(word[0] , __lowerCamelCase )
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = incoming_node.match(
__lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: str ) -> bool:
__UpperCAmelCase : Optional[Any] = self.nodes.get(word[0] , __lowerCamelCase )
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = incoming_node.match(
__lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__UpperCAmelCase : Optional[int] = list(self.nodes.values() )[0]
__UpperCAmelCase : Optional[int] = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCAmelCase : List[str] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__UpperCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
__UpperCAmelCase : Union[str, Any] = list(incoming_node.nodes.values() )[0]
__UpperCAmelCase : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCAmelCase : Optional[Any] = merging_node.nodes
return True
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int = 0 ) -> None:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _UpperCamelCase ( ) -> bool:
__UpperCAmelCase : int = "banana bananas bandana band apple all beast".split()
__UpperCAmelCase : Tuple = RadixNode()
root.insert_many(snake_case__ )
assert all(root.find(snake_case__ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _UpperCamelCase ( ) -> None:
assert test_trie()
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : List[str] = RadixNode()
__UpperCAmelCase : List[str] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__ )
print("Words:", snake_case__ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 342 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = DiTPipeline
lowerCamelCase__: Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__: Tuple = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__: Any = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=__lowerCamelCase , )
__UpperCAmelCase : Optional[int] = AutoencoderKL()
__UpperCAmelCase : str = DDIMScheduler()
__UpperCAmelCase : Union[str, Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any]=0 ) -> Optional[Any]:
if str(__lowerCamelCase ).startswith("mps" ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__UpperCAmelCase : List[str] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : int = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : List[str] = pipe(**__lowerCamelCase ).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCAmelCase : Any = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__UpperCAmelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1e-3 )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Tuple = torch.manual_seed(0 )
__UpperCAmelCase : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__UpperCAmelCase : Dict = ["vase", "umbrella", "white shark", "white wolf"]
__UpperCAmelCase : Any = pipe.get_label_ids(__lowerCamelCase )
__UpperCAmelCase : Tuple = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__UpperCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__UpperCAmelCase : Optional[Any] = ["vase", "umbrella"]
__UpperCAmelCase : Optional[Any] = pipe.get_label_ids(__lowerCamelCase )
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 342 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _UpperCamelCase ( ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = torch.nn.Linear(2, 4 )
__UpperCAmelCase : Dict = torch.optim.AdamW(model.parameters(), lr=1.0 )
__UpperCAmelCase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(snake_case__, max_lr=0.01, steps_per_epoch=2, epochs=1 )
__UpperCAmelCase : Any = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(snake_case__ )
class _snake_case ( _lowercase ):
@require_cuda
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : str = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Tuple = Accelerator(cpu=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Dict = Accelerator()
__UpperCAmelCase : Optional[Any] = GradientState()
assert state.num_steps == 1
__UpperCAmelCase : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCAmelCase : str = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCamelCase ( self: str ) -> Dict:
__UpperCAmelCase : Union[str, Any] = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = create_components()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = create_components()
accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Any ):
pass
with patch("torch.cuda.set_device" , __lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
__UpperCAmelCase : Any = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : str = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = create_components()
accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = get_signature(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCamelCase )
# make sure random weights don't match
load_random_weights(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) < 1e-3 )
def _lowerCamelCase ( self: Dict ) -> Dict:
__UpperCAmelCase : List[str] = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = create_components()
accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = get_signature(__lowerCamelCase )
# saving hook
def save_config(__lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] ):
__UpperCAmelCase : List[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__lowerCamelCase , "data.json" ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
# loading hook
def load_config(__lowerCamelCase: int , __lowerCamelCase: Dict ):
with open(os.path.join(__lowerCamelCase , "data.json" ) , "r" ) as f:
__UpperCAmelCase : Optional[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : str = config["class_name"]
__UpperCAmelCase : Tuple = accelerator.register_save_state_pre_hook(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = accelerator.register_load_state_pre_hook(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase : List[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase : List[str] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : int = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = create_components()
__UpperCAmelCase : str = None
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertTrue(dummy_obj is None )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = create_components()
__UpperCAmelCase : List[str] = [1, 2, 3]
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCamelCase , "_is_accelerate_prepared" , __lowerCamelCase ) , __lowerCamelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
__UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCamelCase , device_map={"": 0} , )
__UpperCAmelCase : Dict = Accelerator()
# This should work
__UpperCAmelCase : str = accelerator.prepare(__lowerCamelCase )
@slow
@require_bnb
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
__UpperCAmelCase : List[str] = Accelerator()
with init_empty_weights():
__UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__UpperCAmelCase : Optional[Any] = infer_auto_device_map(__lowerCamelCase )
__UpperCAmelCase : Any = "cpu"
__UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__lowerCamelCase , load_in_abit=__lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCamelCase )
# This should not work and get value error
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : List[str] = accelerator.prepare(__lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self: Dict ) -> List[str]:
from transformers import AutoModelForCausalLM
__UpperCAmelCase : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__UpperCAmelCase : Any = infer_auto_device_map(__lowerCamelCase )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCamelCase , device_map=__lowerCamelCase , )
__UpperCAmelCase : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = accelerator.prepare(__lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self: Optional[int] ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
__UpperCAmelCase : int = infer_auto_device_map(__lowerCamelCase )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCamelCase , device_map=__lowerCamelCase , )
__UpperCAmelCase : Any = Accelerator()
# This should work
__UpperCAmelCase : int = accelerator.prepare(__lowerCamelCase )
@require_cuda
def _lowerCamelCase ( self: Optional[int] ) -> Any:
__UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
__UpperCAmelCase : int = torch.optim.SGD(model.parameters() , lr=0.01 )
__UpperCAmelCase : List[str] = Accelerator(cpu=__lowerCamelCase )
__UpperCAmelCase : int = accelerator.prepare(__lowerCamelCase )
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[int]:
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__UpperCAmelCase : Optional[int] = i + 1
else:
__UpperCAmelCase : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import copy
import re
class _snake_case :
lowerCamelCase__: List[str] = "hp"
lowerCamelCase__: List[str] = {}
lowerCamelCase__: Tuple = None
@classmethod
def _lowerCamelCase ( cls: Any , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> Dict:
__UpperCAmelCase : List[str] = prefix
__UpperCAmelCase : int = defaults
cls.build_naming_info()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Dict , __lowerCamelCase: Tuple ) -> Tuple:
if len(__lowerCamelCase ) == 0:
return ""
__UpperCAmelCase : Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
__UpperCAmelCase : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__UpperCAmelCase : List[str] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase: Tuple ):
__UpperCAmelCase : Optional[int] = ""
while integer != 0:
__UpperCAmelCase : Union[str, Any] = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__UpperCAmelCase : Dict = 0
while True:
__UpperCAmelCase : int = word + "#" + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__UpperCAmelCase : List[str] = sword
break
__UpperCAmelCase : str = short_word
__UpperCAmelCase : int = word
return short_word
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any , __lowerCamelCase: List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = param_name.split("_" )
__UpperCAmelCase : str = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__UpperCAmelCase : List[str] = ["", "_"]
for separator in separators:
__UpperCAmelCase : Any = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__UpperCAmelCase : Optional[Any] = shortname
__UpperCAmelCase : int = param_name
return shortname
return param_name
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : List[Any] = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = short_name
__UpperCAmelCase : Tuple = param_name
@classmethod
def _lowerCamelCase ( cls: Dict ) -> Any:
if cls.NAMING_INFO is not None:
return
__UpperCAmelCase : Union[str, Any] = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__UpperCAmelCase : str = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = info
@classmethod
def _lowerCamelCase ( cls: Union[str, Any] , __lowerCamelCase: Optional[int] ) -> str:
cls.build_naming_info()
assert cls.PREFIX is not None
__UpperCAmelCase : Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__UpperCAmelCase : Any = cls.NAMING_INFO["short_param"][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : int = 1 if v else 0
__UpperCAmelCase : str = "" if isinstance(__lowerCamelCase , (int, float) ) else "-"
__UpperCAmelCase : Tuple = f'''{key}{sep}{v}'''
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Dict ) -> Any:
__UpperCAmelCase : Any = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__UpperCAmelCase : str = []
else:
__UpperCAmelCase : Union[str, Any] = repr.split("_" )
__UpperCAmelCase : str = {}
for value in values:
if "-" in value:
__UpperCAmelCase , __UpperCAmelCase : Tuple = value.split("-" )
else:
__UpperCAmelCase : List[str] = re.sub("[0-9.]" , "" , __lowerCamelCase )
__UpperCAmelCase : List[Any] = float(re.sub("[^0-9.]" , "" , __lowerCamelCase ) )
__UpperCAmelCase : int = cls.NAMING_INFO["reverse_short_param"][p_k]
__UpperCAmelCase : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__UpperCAmelCase : Dict = cls.DEFAULTS[k]
return parameters
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Tuple ) -> Dict:
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCAmelCase : Union[str, Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCAmelCase : List[str] = {"unk_token": "<unk>"}
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
__UpperCAmelCase : int = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] , **__lowerCamelCase: str ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Dict , **__lowerCamelCase: Any ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: List[str] , **__lowerCamelCase: List[Any] ) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = self.get_rust_tokenizer()
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Any = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
__UpperCAmelCase : Any = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCAmelCase : Any = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
__UpperCAmelCase : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> str:
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[int] = image_processor(__lowerCamelCase , return_tensors="np" )
__UpperCAmelCase : Union[str, Any] = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__UpperCAmelCase : str = "lower newer"
__UpperCAmelCase : List[str] = processor(text=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Optional[int] = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__UpperCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Any = processor.batch_decode(__lowerCamelCase )
__UpperCAmelCase : List[str] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : str = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__UpperCAmelCase : Dict = "lower newer"
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : str = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = None, ) -> Tuple:
__UpperCAmelCase : Optional[int] = {}
if train_file is not None:
__UpperCAmelCase : str = [train_file]
if eval_file is not None:
__UpperCAmelCase : List[Any] = [eval_file]
if test_file is not None:
__UpperCAmelCase : Optional[Any] = [test_file]
__UpperCAmelCase : Optional[int] = datasets.load_dataset("csv", data_files=snake_case__ )
__UpperCAmelCase : Tuple = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase : str = features_name.pop(snake_case__ )
__UpperCAmelCase : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase : Union[str, Any] = {label: i for i, label in enumerate(snake_case__ )}
__UpperCAmelCase : List[str] = tokenizer.model_input_names
__UpperCAmelCase : Any = {}
if len(snake_case__ ) == 1:
for k in files.keys():
__UpperCAmelCase : Optional[int] = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=snake_case__, max_length=snake_case__, padding="max_length" ), batched=snake_case__, )
elif len(snake_case__ ) == 2:
for k in files.keys():
__UpperCAmelCase : str = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=snake_case__, max_length=snake_case__, padding="max_length", ), batched=snake_case__, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase : int = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase : Any = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase : List[Any] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case = logging.getLogger(__name__)
@dataclass
class _snake_case :
lowerCamelCase__: int = field(metadata={"help": "Which column contains the label"} )
lowerCamelCase__: str = field(default=_lowercase , metadata={"help": "The path of the training file"} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The path of the development file"} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The path of the test file"} )
lowerCamelCase__: int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
lowerCamelCase__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def _UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=snake_case__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(snake_case__ ), labelaid=snake_case__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task="text-classification", cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
__UpperCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool(".bin" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, )
def compute_metrics(snake_case__ ) -> Dict:
__UpperCAmelCase : str = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase : Dict = TFTrainer(
model=snake_case__, args=snake_case__, train_dataset=snake_case__, eval_dataset=snake_case__, compute_metrics=snake_case__, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : List[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : Optional[Any] = trainer.evaluate()
__UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir, "eval_results.txt" )
with open(snake_case__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(snake_case__ )
return results
if __name__ == "__main__":
main()
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
_snake_case = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = VOCAB_FILES_NAMES
lowerCamelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: List[str] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: Optional[int] = ["input_ids", "attention_mask"]
lowerCamelCase__: Dict = DistilBertTokenizer
def __init__( self: Dict , __lowerCamelCase: List[Any]=None , __lowerCamelCase: str=None , __lowerCamelCase: Any=True , __lowerCamelCase: List[Any]="[UNK]" , __lowerCamelCase: Any="[SEP]" , __lowerCamelCase: List[str]="[PAD]" , __lowerCamelCase: Union[str, Any]="[CLS]" , __lowerCamelCase: Optional[Any]="[MASK]" , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=None , **__lowerCamelCase: Tuple , ) -> List[Any]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
__UpperCAmelCase : List[str] = do_lower_case
__UpperCAmelCase : Any = strip_accents
__UpperCAmelCase : List[str] = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**__lowerCamelCase )
__UpperCAmelCase : List[str] = do_lower_case
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict=None ) -> str:
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self: int , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_snake_case = logging.get_logger(__name__)
_snake_case = {}
_snake_case = {}
_snake_case = {}
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = None, ) -> Tuple:
__UpperCAmelCase : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__UpperCAmelCase : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__UpperCAmelCase : int = format_type
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = None ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__UpperCAmelCase : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_snake_case = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_snake_case = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_snake_case = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def _UpperCamelCase ( snake_case__ ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCamelCase ( snake_case__, **snake_case__ ) -> Formatter:
__UpperCAmelCase : str = get_format_type_from_alias(snake_case__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
def _UpperCamelCase ( snake_case__ ) -> list:
def merge(snake_case__, snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
__UpperCAmelCase : Union[str, Any] = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
def _UpperCamelCase ( snake_case__ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__UpperCAmelCase : Optional[Any] = sorted(string.lower() )
return len(snake_case__ ) == len(set(snake_case__ ) )
if __name__ == "__main__":
_snake_case = input('''Enter a string ''').strip()
_snake_case = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
_snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
for attribute in key.split("." ):
__UpperCAmelCase : Any = getattr(snake_case__, snake_case__ )
if weight_type is not None:
__UpperCAmelCase : Optional[int] = getattr(snake_case__, snake_case__ ).shape
else:
__UpperCAmelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
__UpperCAmelCase : str = value
elif weight_type == "weight_v":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
__UpperCAmelCase : Union[str, Any] = value
else:
__UpperCAmelCase : Any = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = fairseq_model.state_dict()
__UpperCAmelCase : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__, snake_case__, snake_case__, snake_case__, hf_model.config.feat_extract_norm == "group", )
__UpperCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
__UpperCAmelCase : int = True
if "*" in mapped_key:
__UpperCAmelCase : str = name.split(snake_case__ )[0].split("." )[-2]
__UpperCAmelCase : Union[str, Any] = mapped_key.replace("*", snake_case__ )
if "weight_g" in name:
__UpperCAmelCase : int = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Any = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : List[Any] = "weight"
else:
__UpperCAmelCase : str = None
set_recursively(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Union[str, Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : List[str] = name.split("." )
__UpperCAmelCase : List[str] = int(items[0] )
__UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None, snake_case__=True ) -> List[Any]:
if config_path is not None:
__UpperCAmelCase : Any = UniSpeechSatConfig.from_pretrained(snake_case__ )
else:
__UpperCAmelCase : List[Any] = UniSpeechSatConfig()
__UpperCAmelCase : Optional[Any] = ""
if is_finetuned:
__UpperCAmelCase : int = UniSpeechSatForCTC(snake_case__ )
else:
__UpperCAmelCase : Optional[Any] = UniSpeechSatForPreTraining(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
__UpperCAmelCase : Dict = model[0].eval()
recursively_load_weights(snake_case__, snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_snake_case = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = "decision_transformer"
lowerCamelCase__: str = ["past_key_values"]
lowerCamelCase__: int = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Dict , __lowerCamelCase: Optional[Any]=17 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: Optional[int]=1_28 , __lowerCamelCase: Tuple=40_96 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Any=10_24 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: List[str]=1 , __lowerCamelCase: int=None , __lowerCamelCase: Any="relu" , __lowerCamelCase: List[Any]=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[int]=1e-5 , __lowerCamelCase: int=0.02 , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[int]=5_02_56 , __lowerCamelCase: List[Any]=5_02_56 , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Optional[Any]=False , **__lowerCamelCase: Any , ) -> str:
__UpperCAmelCase : Optional[Any] = state_dim
__UpperCAmelCase : Dict = act_dim
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Dict = max_ep_len
__UpperCAmelCase : List[Any] = action_tanh
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = n_positions
__UpperCAmelCase : Tuple = n_layer
__UpperCAmelCase : Union[str, Any] = n_head
__UpperCAmelCase : Tuple = n_inner
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : int = resid_pdrop
__UpperCAmelCase : Dict = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : List[str] = layer_norm_epsilon
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Tuple = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : Union[str, Any] = reorder_and_upcast_attn
__UpperCAmelCase : Optional[Any] = bos_token_id
__UpperCAmelCase : Any = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : str = "ylacombe/bark-small"
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Tuple = "en_speaker_1"
__UpperCAmelCase : Optional[int] = "This is a test string"
__UpperCAmelCase : Optional[int] = "speaker_embeddings_path.json"
__UpperCAmelCase : Dict = "speaker_embeddings"
def _lowerCamelCase ( self: List[Any] , **__lowerCamelCase: Any ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self: int ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = BarkProcessor(tokenizer=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Dict = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCAmelCase : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__UpperCAmelCase : Union[str, Any] = 35
__UpperCAmelCase : str = 2
__UpperCAmelCase : Union[str, Any] = 8
__UpperCAmelCase : Any = {
"semantic_prompt": np.ones(__lowerCamelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = processor(text=self.input_string , voice_preset=__lowerCamelCase )
__UpperCAmelCase : Any = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__UpperCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Any = BarkProcessor(tokenizer=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = processor(text=self.input_string )
__UpperCAmelCase : Any = tokenizer(
self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : List[str] = ""
for i in table:
res += inp[i - 1]
return res
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
return data[1:] + data[0]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : List[Any] = ""
for i in range(len(snake_case__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = int("0b" + data[0] + data[-1], 2 )
__UpperCAmelCase : str = int("0b" + data[1:3], 2 )
return bin(s[row][col] )[2:]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : int = message[:4]
__UpperCAmelCase : Dict = message[4:]
__UpperCAmelCase : Tuple = apply_table(snake_case__, snake_case__ )
__UpperCAmelCase : Dict = xor(snake_case__, snake_case__ )
__UpperCAmelCase : Dict = apply_sbox(snake_case__, temp[:4] ) # noqa: E741
__UpperCAmelCase : List[str] = apply_sbox(snake_case__, temp[4:] )
__UpperCAmelCase : List[str] = "0" * (2 - len(snake_case__ )) + l # noqa: E741
__UpperCAmelCase : Optional[int] = "0" * (2 - len(snake_case__ )) + r
__UpperCAmelCase : Dict = apply_table(l + r, snake_case__ )
__UpperCAmelCase : List[Any] = xor(snake_case__, snake_case__ )
return temp + right
if __name__ == "__main__":
_snake_case = input('''Enter 10 bit key: ''')
_snake_case = input('''Enter 8 bit message: ''')
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = BlenderbotSmallTokenizer
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> Union[str, Any]:
super().setUp()
__UpperCAmelCase : Union[str, Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCAmelCase : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCAmelCase : Optional[Any] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _lowerCamelCase ( self: Optional[int] , **__lowerCamelCase: int ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = "adapt act apte"
__UpperCAmelCase : int = "adapt act apte"
return input_text, output_text
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : Optional[Any] = "adapt act apte"
__UpperCAmelCase : Tuple = ["adapt", "act", "ap@@", "te"]
__UpperCAmelCase : str = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCAmelCase : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [13_84]
__UpperCAmelCase : List[Any] = "I am a small frog."
__UpperCAmelCase : Optional[Any] = tok([src_text] , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
__UpperCAmelCase : Dict = tok.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : str = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCAmelCase : List[str] = "I am a small frog ."
__UpperCAmelCase : Optional[Any] = "."
__UpperCAmelCase : Union[str, Any] = tok(__lowerCamelCase )["input_ids"]
__UpperCAmelCase : List[Any] = tok(__lowerCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_snake_case = ['''gpt2''']
_snake_case = '''gpt2'''
if is_tf_available():
class _snake_case ( tf.Module ):
def __init__( self: List[Any] , __lowerCamelCase: Dict ) -> str:
super().__init__()
__UpperCAmelCase : Union[str, Any] = tokenizer
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = TFGPTaLMHeadModel.from_config(__lowerCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = self.tokenizer(__lowerCamelCase )
__UpperCAmelCase : List[Any] = tokenized["input_ids"].to_tensor()
__UpperCAmelCase : Tuple = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase : List[str] = self.model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> Dict:
super().setUp()
__UpperCAmelCase : List[str] = [GPTaTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase : int = [TFGPTaTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase : Union[str, Any] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__UpperCAmelCase : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCamelCase ( self: Dict ) -> Dict:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase : Any = tokenizer([test_inputs] , return_tensors="tf" )
__UpperCAmelCase : Dict = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase : int = python_outputs[key].numpy()
__UpperCAmelCase : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowerCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def _lowerCamelCase ( self: int ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : int = tf.function(__lowerCamelCase )
for test_inputs in self.test_sentences:
__UpperCAmelCase : Any = tf.constant(__lowerCamelCase )
__UpperCAmelCase : Any = compiled_tokenizer(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = tf_tokenizer(__lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : List[str] = ModelToSave(tokenizer=__lowerCamelCase )
__UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : List[Any] = model.serving(__lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase ) / "saved.model"
tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={"serving_default": model.serving} )
__UpperCAmelCase : Dict = tf.saved_model.load(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = loaded_model.signatures["serving_default"](__lowerCamelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowerCamelCase ( self: Dict ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : Any = tf_tokenizer(__lowerCamelCase ) # Build model with some sample inputs
__UpperCAmelCase : Any = tf_tokenizer.get_config()
__UpperCAmelCase : Dict = TFGPTaTokenizer.from_config(__lowerCamelCase )
__UpperCAmelCase : str = model_from_config(__lowerCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowerCamelCase ( self: str ) -> str:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase : int = 12_31_23
for max_length in [3, 5, 10_24]:
__UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase : List[str] = tf_tokenizer(__lowerCamelCase , max_length=__lowerCamelCase )
__UpperCAmelCase : Dict = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = ["image_processor", "tokenizer"]
lowerCamelCase__: Union[str, Any] = "LayoutLMv3ImageProcessor"
lowerCamelCase__: Any = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self: Optional[Any] , __lowerCamelCase: str=None , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
__UpperCAmelCase : str = kwargs.pop("feature_extractor" )
__UpperCAmelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , __lowerCamelCase: Optional[Union[List[int], List[List[int]]]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Union[str, Any] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__UpperCAmelCase : List[str] = self.image_processor(images=__lowerCamelCase , return_tensors=__lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCAmelCase : str = features["words"]
__UpperCAmelCase : str = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel values
__UpperCAmelCase : int = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__UpperCAmelCase : str = self.get_overflowing_images(__lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
__UpperCAmelCase : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> str:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__UpperCAmelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(__lowerCamelCase )} and {len(__lowerCamelCase )}''' )
return images_with_overflow
def _lowerCamelCase ( self: int , *__lowerCamelCase: int , **__lowerCamelCase: Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , *__lowerCamelCase: str , **__lowerCamelCase: Tuple ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self: Dict ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = MgpstrTokenizer
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Optional[int] = {}
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> str:
super().setUp()
# fmt: off
__UpperCAmelCase : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
def _lowerCamelCase ( self: Tuple , **__lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : Optional[Any] = "tester"
__UpperCAmelCase : int = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Dict ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_input_output_texts(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertNotEqual(len(__lowerCamelCase ) , 0 )
__UpperCAmelCase : int = tokenizer.decode(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(text_a.replace(" " , "" ) , __lowerCamelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
pass
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.