code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def _UpperCamelCase ( snake_case__ = 6008_5147_5143 ) -> int:
try:
__UpperCAmelCase : int = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Dict = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : List[Any] = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : str = n
return int(snake_case__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _snake_case :
def __init__( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple=13 , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: str=3 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: Any=True , __lowerCamelCase: str=True , __lowerCamelCase: int=32 , __lowerCamelCase: Dict=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: Any=37 , __lowerCamelCase: Dict="gelu" , __lowerCamelCase: List[Any]=0.1 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: int=10 , __lowerCamelCase: int=0.02 , __lowerCamelCase: Optional[int]="divided_space_time" , __lowerCamelCase: Optional[Any]=None , ) -> Optional[int]:
__UpperCAmelCase : int = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : Optional[Any] = num_frames
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_type
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCAmelCase : Union[str, Any] = self.num_labels
return config
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = TimesformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__UpperCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = TimesformerForVideoClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__UpperCAmelCase : List[Any] = model(snake_case__ )
# verify the logits shape
__UpperCAmelCase : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case__ )
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( A_ , A_ , unittest.TestCase ):
lowerCamelCase__: Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__: Any = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: List[str] = False
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Union[str, Any] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Any = TimesformerModelTester(self )
__UpperCAmelCase : Any = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=False ) -> Any:
__UpperCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def _lowerCamelCase ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Tuple ) -> Tuple:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _lowerCamelCase ( self: int ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(snake_case__ )
__UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _lowerCamelCase ( self: int ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case__ )
@slow
def _lowerCamelCase ( self: Tuple ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = TimesformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCamelCase ( self: Any ) -> Tuple:
if not self.has_attentions:
pass
else:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = self.model_tester.seq_length
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_frames
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
__UpperCAmelCase : int = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCAmelCase : str = len(snake_case__ )
# Check attention is always last and order is fine
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : str = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self: Any ) -> List[Any]:
def check_hidden_states_output(__lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] ):
__UpperCAmelCase : List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
__UpperCAmelCase : Optional[int] = outputs.hidden_states
__UpperCAmelCase : Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
__UpperCAmelCase : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _UpperCamelCase ( ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
__UpperCAmelCase : int = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: int ) -> Tuple:
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : Tuple = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
snake_case__ )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_video()
__UpperCAmelCase : Union[str, Any] = image_processor(video[:8] , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Any = model(**snake_case__ )
# verify the logits
__UpperCAmelCase : Tuple = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__UpperCAmelCase : Optional[Any] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 360 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_snake_case = get_logger(__name__)
_snake_case = r'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class _snake_case :
@add_start_docstrings(_a )
def __call__( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] ) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
@add_start_docstrings(_a )
def __call__( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( __SCREAMING_SNAKE_CASE ):
@add_start_docstrings(_a )
def __call__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: Dict , **__lowerCamelCase: Dict ) -> List[Any]:
for processor in self:
__UpperCAmelCase : Union[str, Any] = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase : Tuple = processor(_a , _a , _a , **_a )
else:
__UpperCAmelCase : List[str] = processor(_a , _a , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase : str = temperature
def __call__( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: str ) -> Dict:
__UpperCAmelCase : List[Any] = scores / self.temperature
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: int = -float("Inf" ) , __lowerCamelCase: Any = 1 ) -> int:
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase : Any = top_p
__UpperCAmelCase : str = filter_value
__UpperCAmelCase : List[str] = min_tokens_to_keep
def __call__( self: str , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: int ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Dict = lax.top_k(_a , scores.shape[-1] )
__UpperCAmelCase : Optional[Any] = jnp.full_like(_a , self.filter_value )
__UpperCAmelCase : Optional[int] = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase : Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase : List[str] = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__UpperCAmelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__UpperCAmelCase : List[str] = jnp.where(_a , _a , _a )
__UpperCAmelCase : Optional[int] = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any = -float("Inf" ) , __lowerCamelCase: List[str] = 1 ) -> Union[str, Any]:
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase : Any = max(_a , _a )
__UpperCAmelCase : int = filter_value
def __call__( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = scores.shape
__UpperCAmelCase : Dict = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase , __UpperCAmelCase : str = lax.top_k(_a , _a )
__UpperCAmelCase : Optional[Any] = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase : str = topk_scores.flatten()
__UpperCAmelCase : Union[str, Any] = topk_indices.flatten() + shift
__UpperCAmelCase : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_a )
__UpperCAmelCase : int = next_scores_flat.reshape(_a , _a )
return next_scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[str] , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : List[str] = bos_token_id
def __call__( self: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : int = jnp.full(scores.shape , -float("inf" ) )
__UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase : int = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Tuple = max_length
__UpperCAmelCase : Dict = eos_token_id
def __call__( self: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("inf" ) )
__UpperCAmelCase : Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase : List[Any] = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] ) -> Dict:
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase : List[str] = min_length
__UpperCAmelCase : Dict = eos_token_id
def __call__( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple ) -> Optional[Any]:
# create boolean flag to decide if min length penalty should be applied
__UpperCAmelCase : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase : Optional[int] = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Any ) -> Optional[Any]:
__UpperCAmelCase : str = list(_a )
__UpperCAmelCase : Any = begin_index
def __call__( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ) -> Tuple:
__UpperCAmelCase : Any = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase : str = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Optional[Any] , __lowerCamelCase: Optional[Any] ) -> Tuple:
__UpperCAmelCase : int = list(_a )
def __call__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] ) -> Dict:
__UpperCAmelCase : int = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Dict , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : List[str] = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase : str = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(_a )
__UpperCAmelCase : Union[str, Any] = jnp.intaa(_a )
def __call__( self: str , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
def _force_token(__lowerCamelCase: Dict ):
__UpperCAmelCase : Optional[Any] = scores.shape[0]
__UpperCAmelCase : Dict = self.force_token_array[generation_idx]
__UpperCAmelCase : Union[str, Any] = jnp.ones_like(_a , dtype=scores.dtype ) * -float("inf" )
__UpperCAmelCase : List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase : Optional[Any] = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__UpperCAmelCase : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = generate_config.eos_token_id
__UpperCAmelCase : int = generate_config.no_timestamps_token_id
__UpperCAmelCase : List[Any] = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , "max_initial_timestamp_index" ):
__UpperCAmelCase : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase : int = model_config.vocab_size
def __call__( self: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Optional[int]:
# suppress <|notimestamps|> which is handled by without_timestamps
__UpperCAmelCase : List[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__lowerCamelCase: Any , __lowerCamelCase: str ):
__UpperCAmelCase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__UpperCAmelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__UpperCAmelCase : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__UpperCAmelCase : Optional[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _a , )
__UpperCAmelCase : int = jax.vmap(_a )(_a , _a )
__UpperCAmelCase : List[Any] = jnp.where(cur_len == self.begin_index , _a , _a )
__UpperCAmelCase : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__UpperCAmelCase : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase : List[Any] = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase : Any = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(__lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ):
__UpperCAmelCase : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _a , )
__UpperCAmelCase : Tuple = jax.vmap(_a )(_a , _a )
return scores
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = "upernet"
def __init__( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Dict=[1, 2, 3, 6] , __lowerCamelCase: Dict=True , __lowerCamelCase: List[str]=0.4 , __lowerCamelCase: Optional[int]=3_84 , __lowerCamelCase: Union[str, Any]=2_56 , __lowerCamelCase: List[Any]=1 , __lowerCamelCase: Optional[int]=False , __lowerCamelCase: Union[str, Any]=2_55 , **__lowerCamelCase: Tuple , ) -> List[str]:
super().__init__(**__lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Any = CONFIG_MAPPING['resnet'](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : int = backbone_config.get("model_type" )
__UpperCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[Any] = config_class.from_dict(__lowerCamelCase )
__UpperCAmelCase : List[Any] = backbone_config
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : int = pool_scales
__UpperCAmelCase : List[str] = use_auxiliary_head
__UpperCAmelCase : Optional[Any] = auxiliary_loss_weight
__UpperCAmelCase : Optional[int] = auxiliary_in_channels
__UpperCAmelCase : Any = auxiliary_channels
__UpperCAmelCase : Dict = auxiliary_num_convs
__UpperCAmelCase : Optional[Any] = auxiliary_concat_input
__UpperCAmelCase : str = loss_ignore_index
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 362 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : List[str] = SwinConfig(
embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["stage2", "stage3", "stage4"], )
__UpperCAmelCase : Any = DetaConfig(
backbone_config=_lowerCamelCase, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=_lowerCamelCase, with_box_refine=_lowerCamelCase, two_stage=_lowerCamelCase, )
# set labels
__UpperCAmelCase : Any = """huggingface/label-files"""
if "o365" in model_name:
__UpperCAmelCase : str = 366
__UpperCAmelCase : Union[str, Any] = """object365-id2label.json"""
else:
__UpperCAmelCase : str = 91
__UpperCAmelCase : Any = """coco-detection-id2label.json"""
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = json.load(open(cached_download(hf_hub_url(_lowerCamelCase, _lowerCamelCase, repo_type="dataset" ) ), "r" ) )
__UpperCAmelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
__UpperCAmelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = dct.pop(_lowerCamelCase )
__UpperCAmelCase : Tuple = val
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : int = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : List[str] = in_proj_weight[:dim, :]
__UpperCAmelCase : int = in_proj_bias[: dim]
__UpperCAmelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase : str = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Optional[Any] = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : Tuple = in_proj_bias[:hidden_size]
__UpperCAmelCase : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Optional[Any] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[-hidden_size:]
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Dict = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : List[Any] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : List[Any] = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[int] = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__UpperCAmelCase : str = torch.load(_lowerCamelCase, map_location="cpu" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase, param.shape )
# rename keys
__UpperCAmelCase : List[str] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase, _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
__UpperCAmelCase : Tuple = val
if "input_proj" in key:
__UpperCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
__UpperCAmelCase : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : int = state_dict.pop(_lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Dict = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(_lowerCamelCase )
# load image processor
__UpperCAmelCase : str = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Tuple = processor(images=_lowerCamelCase, return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = encoding["""pixel_values"""]
__UpperCAmelCase : List[str] = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("Logits:", outputs.logits[0, :3, :3] )
print("Boxes:", outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCAmelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCAmelCase : List[str] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(_lowerCamelCase ), atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(_lowerCamelCase ), atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCAmelCase : List[Any] = test_metrics
@require_cpu
def _lowerCamelCase ( self: int ) -> Any:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def _lowerCamelCase ( self: List[str] ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__UpperCAmelCase : Dict = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 364 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 0 |
import argparse
import os
import re
import packaging.version
_snake_case = '''examples/'''
_snake_case = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_snake_case = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_snake_case = '''README.md'''
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int:
with open(__a, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : Tuple = f.read()
__UpperCAmelCase : str = REPLACE_PATTERNS[pattern]
__UpperCAmelCase : List[str] = replace.replace("VERSION", __a )
__UpperCAmelCase : List[Any] = re_pattern.sub(__a, __a )
with open(__a, "w", encoding="utf-8", newline="\n" ) as f:
f.write(__a )
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__a, __a ), __a, pattern="examples" )
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> int:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a, __a, __a )
if not patch:
update_version_in_examples(__a )
def _UpperCamelCase ( ) -> List[str]:
__UpperCAmelCase : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase : str = '''1. Want to contribute a new model?'''
with open(__a, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
__UpperCAmelCase : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__UpperCAmelCase : Tuple = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", )
index += 1
with open(__a, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__a )
def _UpperCamelCase ( ) -> List[str]:
with open(REPLACE_FILES["init"], "r" ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
__UpperCAmelCase : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _UpperCamelCase ( snake_case__=False ) -> str:
__UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__UpperCAmelCase : List[Any] = default_version.base_version
elif patch:
__UpperCAmelCase : str = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__UpperCAmelCase : List[str] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__UpperCAmelCase : Dict = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__a ) == 0:
__UpperCAmelCase : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__a, patch=__a )
if not patch:
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _UpperCamelCase ( ) -> Tuple:
__UpperCAmelCase : str = get_version()
__UpperCAmelCase : int = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase : Union[str, Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__a ) == 0:
__UpperCAmelCase : List[str] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__a )
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
from datetime import datetime
import requests
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : List[str] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCAmelCase : Tuple = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase__ ).content
if __name__ == "__main__":
_snake_case = input('''Enter Video/IGTV url: ''').strip()
_snake_case = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_snake_case = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( ) -> Optional[int]:
__UpperCAmelCase : str = "https://pypi.org/pypi/diffusers/json"
__UpperCAmelCase : List[str] = json.loads(request.urlopen(a_ ).read() )["releases"].keys()
return sorted(a_, key=lambda snake_case__ : version.Version(a_ ) )
def _UpperCamelCase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_, exist_ok=a_ )
__UpperCAmelCase : List[str] = Path(a_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
init_hf_modules()
__UpperCAmelCase : Tuple = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_, exist_ok=a_ )
__UpperCAmelCase : Any = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _UpperCamelCase ( snake_case__ ) -> str:
with open(a_, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : int = f.read()
# Imports of the form `import .xxx`
__UpperCAmelCase : List[Any] = re.findall("^\s*import\s+\.(\S+)\s*$", a_, flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import", a_, flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : int = [module_file]
__UpperCAmelCase : int = []
# Let's recurse through all relative imports
while not no_change:
__UpperCAmelCase : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
__UpperCAmelCase : Union[str, Any] = Path(a_ ).parent
__UpperCAmelCase : Optional[int] = [str(module_path / m ) for m in new_imports]
__UpperCAmelCase : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
__UpperCAmelCase : List[Any] = [f'''{f}.py''' for f in new_import_files]
__UpperCAmelCase : Optional[Any] = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def _UpperCamelCase ( snake_case__ ) -> int:
with open(a_, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : List[Any] = f.read()
# Imports of the form `import xxx`
__UpperCAmelCase : List[Any] = re.findall("^\s*import\s+(\S+)\s*$", a_, flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import", a_, flags=re.MULTILINE )
# Only keep the top-level module
__UpperCAmelCase : Union[str, Any] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
__UpperCAmelCase : List[str] = list(set(a_ ) )
__UpperCAmelCase : List[str] = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f'''{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`''' )
return get_relative_imports(a_ )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Tuple = module_path.replace(os.path.sep, "." )
__UpperCAmelCase : Union[str, Any] = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_, a_ )
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
from ..pipelines import DiffusionPipeline
__UpperCAmelCase : Union[str, Any] = dict(inspect.getmembers(a_, inspect.isclass ) )
__UpperCAmelCase : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls, a_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
__UpperCAmelCase : Optional[int] = cls
return pipeline_class
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = None, snake_case__ = False, snake_case__ = False, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = False, ) -> List[str]:
__UpperCAmelCase : Dict = str(a_ )
__UpperCAmelCase : str = os.path.join(a_, a_ )
if os.path.isfile(a_ ):
__UpperCAmelCase : List[Any] = module_file_or_url
__UpperCAmelCase : Optional[int] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
__UpperCAmelCase : Dict = get_diffusers_versions()
# cut ".dev0"
__UpperCAmelCase : Dict = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
__UpperCAmelCase : Optional[int] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
__UpperCAmelCase : List[Any] = f'''v{revision}'''
elif revision == "main":
__UpperCAmelCase : int = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
__UpperCAmelCase : Any = COMMUNITY_PIPELINES_URL.format(revision=a_, pipeline=a_ )
try:
__UpperCAmelCase : Any = cached_download(
a_, cache_dir=a_, force_download=a_, proxies=a_, resume_download=a_, local_files_only=a_, use_auth_token=a_, )
__UpperCAmelCase : Optional[int] = "git"
__UpperCAmelCase : int = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
__UpperCAmelCase : str = hf_hub_download(
a_, a_, cache_dir=a_, force_download=a_, proxies=a_, resume_download=a_, local_files_only=a_, use_auth_token=a_, )
__UpperCAmelCase : List[Any] = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
__UpperCAmelCase : Any = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
__UpperCAmelCase : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
__UpperCAmelCase : Union[str, Any] = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_, submodule_path / module_file )
for module_needed in modules_needed:
__UpperCAmelCase : Tuple = f'''{module_needed}.py'''
shutil.copy(os.path.join(a_, a_ ), submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_, a_ ):
__UpperCAmelCase : str = use_auth_token
elif use_auth_token is True:
__UpperCAmelCase : str = HfFolder.get_token()
else:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = model_info(a_, revision=a_, token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__UpperCAmelCase : List[str] = submodule_path / commit_hash
__UpperCAmelCase : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_, submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_, f'''{module_needed}.py''', cache_dir=a_, force_download=a_, resume_download=a_, proxies=a_, use_auth_token=a_, revision=a_, local_files_only=a_, )
return os.path.join(a_, a_ )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = False, snake_case__ = False, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = False, **snake_case__, ) -> List[Any]:
__UpperCAmelCase : Tuple = get_cached_module_file(
a_, a_, cache_dir=a_, force_download=a_, resume_download=a_, proxies=a_, use_auth_token=a_, revision=a_, local_files_only=a_, )
return get_class_in_module(a_, final_module.replace(".py", "" ) )
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
import csv
import tweepy
# Twitter API credentials
_snake_case = ''
_snake_case = ''
_snake_case = ''
_snake_case = ''
def _UpperCamelCase ( snake_case__ ) -> None:
# authorize twitter, initialize tweepy
__UpperCAmelCase : Union[str, Any] = tweepy.OAuthHandler(_UpperCamelCase, _UpperCamelCase )
auth.set_access_token(_UpperCamelCase, _UpperCamelCase )
__UpperCAmelCase : int = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
__UpperCAmelCase : List[str] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCAmelCase : Tuple = api.user_timeline(screen_name=_UpperCamelCase, count=200 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
__UpperCAmelCase : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCAmelCase : Optional[Any] = api.user_timeline(
screen_name=_UpperCamelCase, count=200, max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
__UpperCAmelCase : Dict = alltweets[-1].id - 1
print(f'''...{len(_UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCAmelCase : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''', "w" ) as f:
__UpperCAmelCase : Optional[int] = csv.writer(_UpperCamelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_snake_case = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: str , __lowerCamelCase: Union[str, Any] ) -> List[str]:
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Dict = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] ) -> str:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__UpperCAmelCase : List[str] = f'''facebook/wmt19-{pair}'''
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(_lowerCamelCase )
__UpperCAmelCase : List[str] = self.get_model(_lowerCamelCase )
__UpperCAmelCase : Optional[Any] = bleu_data[pair]['''src''']
__UpperCAmelCase : Dict = bleu_data[pair]['''tgt''']
__UpperCAmelCase : Tuple = tokenizer(_lowerCamelCase , return_tensors="pt" , truncation=_lowerCamelCase , padding="longest" ).to(_lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__UpperCAmelCase : Tuple = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
__UpperCAmelCase : int = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores["bleu"] , _lowerCamelCase )
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
for attribute in key.split("." ):
__UpperCAmelCase : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCAmelCase : int = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.feature_extractor
__UpperCAmelCase : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, hf_model.config.feat_extract_norm == "group", )
__UpperCAmelCase : str = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : List[Any] = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
__UpperCAmelCase : List[str] = mapped_key.replace("*", __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Dict = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Any = """bias"""
elif "weight" in name:
__UpperCAmelCase : List[Any] = """weight"""
else:
__UpperCAmelCase : Any = None
set_recursively(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : Any = name.split("." )
__UpperCAmelCase : Optional[int] = int(items[0] )
__UpperCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = full_name.split("adaptor." )[-1]
__UpperCAmelCase : Dict = name.split("." )
if items[1].isdigit():
__UpperCAmelCase : int = int(items[1] )
else:
__UpperCAmelCase : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__UpperCAmelCase : Dict = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__UpperCAmelCase : Any = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__UpperCAmelCase : str = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__UpperCAmelCase : Optional[Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__UpperCAmelCase : int = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__UpperCAmelCase : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
__UpperCAmelCase : int = emb.weight.shape
__UpperCAmelCase : List[str] = nn.Linear(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, bias=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, ) -> Dict:
__UpperCAmelCase : Dict = WavaVecaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE, add_adapter=__SCREAMING_SNAKE_CASE, adapter_stride=__SCREAMING_SNAKE_CASE, adapter_kernel_size=__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE, output_hidden_size=__SCREAMING_SNAKE_CASE, )
__UpperCAmelCase : Dict = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# load model
__UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
}, )
__UpperCAmelCase : Dict = model[0].eval()
# load feature extractor
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
__UpperCAmelCase : List[Any] = WavaVecaModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder, __SCREAMING_SNAKE_CASE )
# load decoder weights
__UpperCAmelCase : Optional[Any] = MBartForCausalLM(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__SCREAMING_SNAKE_CASE )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase : str = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE, decoder=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = MBartaaTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = hf_wavavec.config.to_dict()
__UpperCAmelCase : Optional[Any] = tokenizer.pad_token_id
__UpperCAmelCase : Optional[Any] = tokenizer.bos_token_id
__UpperCAmelCase : List[Any] = tokenizer.eos_token_id
__UpperCAmelCase : Dict = """mbart50"""
__UpperCAmelCase : int = """wav2vec2"""
__UpperCAmelCase : Dict = tokenizer.eos_token_id
__UpperCAmelCase : List[str] = 25_0004
__UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__UpperCAmelCase : Optional[int] = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
_snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case ( __lowercase ):
lowerCamelCase__: List[Any] = (KDPMaDiscreteScheduler,)
lowerCamelCase__: str = 10
def _lowerCamelCase ( self: str , **__lowerCamelCase: int ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_a )
return config
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _lowerCamelCase ( self: int ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCAmelCase : Dict = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : Optional[int] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : List[str] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : Dict = scheduler.scale_model_input(_a , _a )
__UpperCAmelCase : int = model(_a , _a )
__UpperCAmelCase : Any = scheduler.step(_a , _a , _a )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : List[Any] = torch.sum(torch.abs(_a ) )
__UpperCAmelCase : Optional[int] = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
if torch_device == "mps":
return
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : List[Any] = self.get_scheduler_config()
__UpperCAmelCase : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : str = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : List[Any] = scheduler.scale_model_input(_a , _a )
__UpperCAmelCase : Dict = model(_a , _a )
__UpperCAmelCase : List[str] = scheduler.step(_a , _a , _a )
__UpperCAmelCase : Tuple = output.prev_sample
__UpperCAmelCase : str = torch.sum(torch.abs(_a ) )
__UpperCAmelCase : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def _lowerCamelCase ( self: Optional[int] ) -> int:
if torch_device == "mps":
return
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Any = self.get_scheduler_config()
__UpperCAmelCase : Dict = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
__UpperCAmelCase : Tuple = self.dummy_model()
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCAmelCase : Any = scheduler.scale_model_input(_a , _a )
__UpperCAmelCase : Dict = model(_a , _a )
__UpperCAmelCase : Tuple = scheduler.step(_a , _a , _a )
__UpperCAmelCase : List[str] = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(_a ) )
__UpperCAmelCase : Dict = torch.mean(torch.abs(_a ) )
if str(_a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 350 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
_snake_case = parser.parse_args()
_snake_case = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 351 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _snake_case ( __snake_case , unittest.TestCase ):
lowerCamelCase__: Union[str, Any] = CpmAntTokenizer
lowerCamelCase__: str = False
def _lowerCamelCase ( self: List[str] ) -> int:
super().setUp()
__UpperCAmelCase : Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__UpperCAmelCase : Tuple = '''今天天气真好!'''
__UpperCAmelCase : int = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__UpperCAmelCase : int = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__UpperCAmelCase : List[Any] = '''今天天气真好!'''
__UpperCAmelCase : Dict = [tokenizer.bos_token] + tokens
__UpperCAmelCase : List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__: Optional[Any] = "luke"
def __init__( self: List[Any] , __lowerCamelCase: Dict=5_02_67 , __lowerCamelCase: Optional[int]=50_00_00 , __lowerCamelCase: List[str]=7_68 , __lowerCamelCase: Union[str, Any]=2_56 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: str=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: str=5_12 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Tuple=1 , __lowerCamelCase: List[str]=0 , __lowerCamelCase: Optional[Any]=2 , **__lowerCamelCase: Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Any = entity_vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Union[str, Any] = entity_emb_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Any = use_entity_aware_attention
__UpperCAmelCase : Union[str, Any] = classifier_dropout
| 353 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
while b:
__UpperCAmelCase , __UpperCAmelCase : int = b, a % b
return a
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(snake_case__, a % b )
def _UpperCamelCase ( ) -> Union[str, Any]:
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}''' )
if __name__ == "__main__":
main()
| 354 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 0 |
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : str = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _UpperCamelCase ( snake_case__ = 5000 ) -> Optional[int]:
__UpperCAmelCase : int = [(i * (3 * i - 1)) // 2 for i in range(1, snake_case_ )]
for i, pentagonal_i in enumerate(snake_case_ ):
for j in range(snake_case_, len(snake_case_ ) ):
__UpperCAmelCase : Union[str, Any] = pentagonal_nums[j]
__UpperCAmelCase : Dict = pentagonal_i + pentagonal_j
__UpperCAmelCase : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case_ ) and is_pentagonal(snake_case_ ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 355 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( __UpperCamelCase ):
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str ) -> int:
with open(__lowerCamelCase , encoding="utf-8" ) as input_file:
__UpperCAmelCase : int = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__UpperCAmelCase : List[Any] = input_file.read()
__UpperCAmelCase : Optional[Any] = regexp.search(__lowerCamelCase )
return match
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str ) -> Any:
with open(__lowerCamelCase , encoding="utf-8" ) as input_file:
__UpperCAmelCase : List[str] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__UpperCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCAmelCase : Tuple = regexp.finditer(__lowerCamelCase )
__UpperCAmelCase : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = Path("./datasets" )
__UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCamelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : List[Any] = Path("./datasets" )
__UpperCAmelCase : Dict = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCamelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 356 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 0 |
_snake_case = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _UpperCamelCase ( snake_case__ ) -> Tuple:
assert type(A__ ) in (int, float) and decimal == int(A__ )
__UpperCAmelCase : Optional[Any] = int(A__ )
__UpperCAmelCase : Dict = ""
__UpperCAmelCase : int = False
if decimal < 0:
__UpperCAmelCase : Tuple = True
decimal *= -1
while decimal > 0:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(A__, 16 )
__UpperCAmelCase : List[str] = values[remainder] + hexadecimal
__UpperCAmelCase : Any = "0x" + hexadecimal
if negative:
__UpperCAmelCase : Tuple = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCAmelCase : Dict = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCAmelCase : List[str] = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCAmelCase : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : str = [[1, 2, 3], [1, 2, 4]]
__UpperCAmelCase : str = DisjunctiveConstraint(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = dc.update(1 )
__UpperCAmelCase : str = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = dc.update(2 )
__UpperCAmelCase : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = dc.update(3 )
__UpperCAmelCase : Any = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
__UpperCAmelCase : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCAmelCase : Any = DisjunctiveConstraint(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 358 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__ ) -> None:
create_state_space_tree(_UpperCamelCase, [], 0, [0 for i in range(len(_UpperCamelCase ) )] )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, ) -> None:
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__UpperCAmelCase : List[str] = True
create_state_space_tree(_UpperCamelCase, _UpperCamelCase, index + 1, _UpperCamelCase )
current_sequence.pop()
__UpperCAmelCase : Any = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
_snake_case = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase : List[str] = year // 100
__UpperCAmelCase : Optional[int] = (5 * (century % 4) + 2) % 7
__UpperCAmelCase : Any = year % 100
__UpperCAmelCase : List[Any] = centurian % 12
__UpperCAmelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase : str = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 0 |
_snake_case = tuple[float, float, float]
_snake_case = tuple[float, float, float]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Vectorad:
__UpperCAmelCase : Optional[int] = end_pointa[0] - end_pointa[0]
__UpperCAmelCase : Optional[int] = end_pointa[1] - end_pointa[1]
__UpperCAmelCase : int = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Vectorad:
__UpperCAmelCase : str = ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCAmelCase : Optional[int] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCAmelCase : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> bool:
return tuple(round(a__, a__ ) for x in vector ) == (0, 0, 0)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ = 10 ) -> bool:
__UpperCAmelCase : str = create_vector(a__, a__ )
__UpperCAmelCase : Optional[int] = create_vector(a__, a__ )
return is_zero_vector(get_ad_vectors_cross(a__, a__ ), a__ )
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Dict:
try:
__UpperCAmelCase : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCAmelCase : Dict = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCAmelCase : List[str] = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_snake_case = parse_flag_from_env('''RUN_SLOW''', default=False)
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
return unittest.skip("Test was skipped" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> List[str]:
return unittest.skipUnless(_run_slow_tests, "test is slow" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available(), "test requires a XPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Dict:
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> int:
return unittest.skipUnless(is_tpu_available(), "test requires TPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> int:
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> str:
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Tuple:
return unittest.skipUnless(is_torch_version(">=", "1.12.0" ), "test requires torch version >= 1.12.0" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__=None, snake_case__=None ) -> int:
if test_case is None:
return partial(UpperCamelCase__, version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version(">=", UpperCamelCase__ ), f'''test requires torch version >= {version}''' )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> str:
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Any:
return unittest.skipUnless(is_wandb_available(), "test requires wandb" )(UpperCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml" )(UpperCamelCase__ )
_snake_case = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available, "test requires at least one tracker to be available and for `comet_ml` to not be installed", )(UpperCamelCase__ )
class _snake_case ( unittest.TestCase ):
lowerCamelCase__: Optional[int] = True
@classmethod
def _lowerCamelCase ( cls: Union[str, Any] ) -> int:
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls: Tuple ) -> Tuple:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowerCamelCase ( self: Optional[int] ) -> Any:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> str:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[mock.Mock, List[mock.Mock]] ) -> Any:
__UpperCAmelCase : Dict = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Any = AcceleratorState()
__UpperCAmelCase : List[str] = tensor[None].clone().to(state.device )
__UpperCAmelCase : Union[str, Any] = gather(UpperCamelCase__ ).cpu()
__UpperCAmelCase : Dict = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], UpperCamelCase__ ):
return False
return True
class _snake_case :
def __init__( self: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : List[Any] = returncode
__UpperCAmelCase : str = stdout
__UpperCAmelCase : Any = stderr
async def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
while True:
__UpperCAmelCase : str = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=None, snake_case__=None, snake_case__=False, snake_case__=False ) -> str:
if echo:
print("\nRunning: ", " ".join(UpperCamelCase__ ) )
__UpperCAmelCase : List[Any] = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=UpperCamelCase__, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=UpperCamelCase__, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[Any] = []
def tee(snake_case__, snake_case__, snake_case__, snake_case__="" ):
__UpperCAmelCase : int = line.decode("utf-8" ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__, UpperCamelCase__, file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda snake_case__ : tee(UpperCamelCase__, UpperCamelCase__, sys.stdout, label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda snake_case__ : tee(UpperCamelCase__, UpperCamelCase__, sys.stderr, label="stderr:" ) ) ),
], timeout=UpperCamelCase__, )
return _RunOutput(await p.wait(), UpperCamelCase__, UpperCamelCase__ )
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=None, snake_case__=180, snake_case__=False, snake_case__=True ) -> Optional[Any]:
__UpperCAmelCase : List[str] = asyncio.get_event_loop()
__UpperCAmelCase : Any = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__, env=UpperCamelCase__, stdin=UpperCamelCase__, timeout=UpperCamelCase__, quiet=UpperCamelCase__, echo=UpperCamelCase__ ) )
__UpperCAmelCase : Union[str, Any] = " ".join(UpperCamelCase__ )
if result.returncode > 0:
__UpperCAmelCase : Tuple = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _snake_case ( _lowercase ):
pass
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Any:
try:
__UpperCAmelCase : int = subprocess.check_output(UpperCamelCase__, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__, "decode" ):
__UpperCAmelCase : List[Any] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 363 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _UpperCamelCase ( snake_case__, snake_case__=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCAmelCase : str = n - 1
__UpperCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCAmelCase : List[Any] = 0
while count < prec:
__UpperCAmelCase : Tuple = random.randint(2, n - 1 )
__UpperCAmelCase : str = bin_exp_mod(_a, _a, _a )
if b != 1:
__UpperCAmelCase : Union[str, Any] = True
for _ in range(_a ):
if b == n - 1:
__UpperCAmelCase : Union[str, Any] = False
break
__UpperCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 364 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCamelCase ( snake_case__ ) -> str:
return 1 / (1 + np.exp(-z ))
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = np.dot(_UpperCAmelCase, _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=7_0000 ) -> Tuple:
__UpperCAmelCase : List[str] = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
__UpperCAmelCase : Any = np.dot(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Any = sigmoid_function(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = np.dot(x.T, h - y ) / y.size
__UpperCAmelCase : Union[str, Any] = theta - alpha * gradient # updating the weights
__UpperCAmelCase : Optional[Any] = np.dot(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : List[Any] = sigmoid_function(_UpperCAmelCase )
__UpperCAmelCase : str = cost_function(_UpperCAmelCase, _UpperCAmelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_snake_case = datasets.load_iris()
_snake_case = iris.data[:, :2]
_snake_case = (iris.target != 0) * 1
_snake_case = 0.1
_snake_case = logistic_reg(alpha, x, y, max_iterations=70000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return sigmoid_function(
np.dot(_UpperCAmelCase, _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(_snake_case) = (x[:, 0].min(), x[:, 0].max())
(_snake_case) = (x[:, 1].min(), x[:, 1].max())
(_snake_case) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_snake_case = np.c_[xxa.ravel(), xxa.ravel()]
_snake_case = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_snake_case = '''true'''
def _UpperCamelCase ( snake_case__, snake_case__=82, snake_case__=16 ) -> Union[str, Any]:
set_seed(42 )
__UpperCAmelCase : Dict = RegressionModel()
__UpperCAmelCase : Any = deepcopy(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Dict = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Optional[int] = DataLoader(SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__UpperCAmelCase : Optional[Any] = load_dataset("glue", "mrpc", split="validation" )
def tokenize_function(snake_case__ ):
__UpperCAmelCase : Union[str, Any] = tokenizer(examples["sentence1"], examples["sentence2"], truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
__UpperCAmelCase : str = dataset.map(
SCREAMING_SNAKE_CASE_, batched=SCREAMING_SNAKE_CASE_, remove_columns=["idx", "sentence1", "sentence2"], )
__UpperCAmelCase : Any = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(snake_case__ ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_, padding="longest", return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_, padding="max_length", max_length=128, return_tensors="pt" )
return DataLoader(SCREAMING_SNAKE_CASE_, shuffle=SCREAMING_SNAKE_CASE_, collate_fn=SCREAMING_SNAKE_CASE_, batch_size=16 )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : int = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_, split_batches=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Union[str, Any] = get_dataloader(SCREAMING_SNAKE_CASE_, not dispatch_batches )
__UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased", return_dict=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : List[str] = []
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : int = batch.values()
with torch.no_grad():
__UpperCAmelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase : Tuple = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def _UpperCamelCase ( snake_case__, snake_case__=82, snake_case__=False, snake_case__=False, snake_case__=16 ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = get_basic_setup(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = generate_predictions(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'''
def _UpperCamelCase ( snake_case__ = False, snake_case__ = False ) -> Optional[int]:
__UpperCAmelCase : List[Any] = evaluate.load("glue", "mrpc" )
__UpperCAmelCase , __UpperCAmelCase : Any = get_mrpc_setup(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# First do baseline
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["no"]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
__UpperCAmelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_, references=batch["labels"] )
__UpperCAmelCase : Optional[int] = metric.compute()
# Then do distributed
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCAmelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Tuple = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : Any = batch["labels"]
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_, references=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : List[Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE_, dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCAmelCase : int = Accelerator(split_batches=SCREAMING_SNAKE_CASE_, dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(SCREAMING_SNAKE_CASE_, 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__UpperCAmelCase : Optional[int] = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_, 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _snake_case :
def __init__( self: int , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , ) -> Tuple:
__UpperCAmelCase : List[Any] = claim_vector
__UpperCAmelCase : Tuple = allocated_resources_table
__UpperCAmelCase : Any = maximum_claim_table
def _lowerCamelCase ( self: List[Any] ) -> Dict:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowerCamelCase ( self: Dict ) -> int:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowerCamelCase ( self: Optional[int] ) -> Any:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def _lowerCamelCase ( self: List[Any] , **__lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.__need()
__UpperCAmelCase : Optional[Any] = self.__allocated_resources_table
__UpperCAmelCase : Optional[int] = self.__available_resources()
__UpperCAmelCase : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
__UpperCAmelCase : List[Any] = False
for each_need in need_list:
__UpperCAmelCase : List[Any] = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
__UpperCAmelCase : List[str] = False
break
if execution:
__UpperCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCAmelCase : Optional[Any] = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
__UpperCAmelCase : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCamelCase ( snake_case__ ) -> List[str]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : Optional[int] = model_type_to_module_name(_snake_case )
__UpperCAmelCase : Union[str, Any] = importlib.import_module(f'''.{module_name}''', "transformers.models" )
try:
return getattr(_snake_case, _snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case, "__name__", _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : Union[str, Any] = importlib.import_module("transformers" )
if hasattr(_snake_case, _snake_case ):
return getattr(_snake_case, _snake_case )
return None
def _UpperCamelCase ( snake_case__, snake_case__ = None, snake_case__ = False, snake_case__ = False, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = False, **snake_case__, ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = get_file_from_repo(
_snake_case, _snake_case, cache_dir=_snake_case, force_download=_snake_case, resume_download=_snake_case, proxies=_snake_case, use_auth_token=_snake_case, revision=_snake_case, local_files_only=_snake_case, )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_snake_case, encoding="utf-8" ) as reader:
return json.load(_snake_case )
class _snake_case :
def __init__( self: Optional[Any] ) -> Dict:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_a )
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Dict , **__lowerCamelCase: Any ) -> str:
__UpperCAmelCase : Optional[Any] = kwargs.pop("config" , _a )
__UpperCAmelCase : List[Any] = kwargs.pop("trust_remote_code" , _a )
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
__UpperCAmelCase : Optional[Any] = config_dict.get("feature_extractor_type" , _a )
__UpperCAmelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__UpperCAmelCase : int = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
__UpperCAmelCase : Union[str, Any] = getattr(_a , "feature_extractor_type" , _a )
if hasattr(_a , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__UpperCAmelCase : Tuple = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__UpperCAmelCase : List[str] = feature_extractor_class_from_name(_a )
__UpperCAmelCase : Dict = feature_extractor_auto_map is not None
__UpperCAmelCase : str = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
__UpperCAmelCase : Dict = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
__UpperCAmelCase : Union[str, Any] = get_class_from_dynamic_module(
_a , _a , **_a )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("code_revision" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
__UpperCAmelCase : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> List[str]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
def __init__( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: MutableSequence[float] ) -> Dict:
if len(_a ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
__UpperCAmelCase : list[float] = list(_a )
__UpperCAmelCase : Dict = degree
def __add__( self: int , __lowerCamelCase: Polynomial ) -> Optional[int]:
if self.degree > polynomial_a.degree:
__UpperCAmelCase : Optional[int] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _a )
else:
__UpperCAmelCase : List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _a )
def __sub__( self: int , __lowerCamelCase: Polynomial ) -> Any:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Tuple ) -> Union[str, Any]:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: Optional[Any] , __lowerCamelCase: Polynomial ) -> Any:
__UpperCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _a )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: int | float ) -> int:
__UpperCAmelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Optional[int] ) -> Dict:
__UpperCAmelCase : Optional[int] = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_a )
return polynomial
def __repr__( self: int ) -> List[str]:
return self.__str__()
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
__UpperCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _a )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int | float = 0 ) -> Union[str, Any]:
__UpperCAmelCase : list[float] = [0] * (self.degree + 2)
__UpperCAmelCase : List[str] = constant
for i in range(self.degree + 1 ):
__UpperCAmelCase : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _a )
def __eq__( self: Union[str, Any] , __lowerCamelCase: object ) -> Optional[int]:
if not isinstance(_a , _a ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: List[Any] , __lowerCamelCase: object ) -> Optional[int]:
return not self.__eq__(_a )
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 0 |
def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__lowerCAmelCase ) == 1:
return True
__UpperCAmelCase : Any = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _UpperCamelCase ( snake_case__ ) -> float:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
__UpperCAmelCase : Any = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: List[Any] , ) -> List[Any]:
__UpperCAmelCase : int = parent
__UpperCAmelCase : Optional[int] = 13
__UpperCAmelCase : Tuple = 7
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Union[str, Any] = 99
__UpperCAmelCase : Any = 32
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = 37
__UpperCAmelCase : str = 'gelu'
__UpperCAmelCase : Dict = 0.1
__UpperCAmelCase : Optional[int] = 0.1
__UpperCAmelCase : Optional[Any] = 5_12
__UpperCAmelCase : List[Any] = 16
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Optional[int] = 0.02
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : str = None
def _lowerCamelCase ( self: List[Any] ) -> int:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Any ) -> List[Any]:
(
__UpperCAmelCase
) : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Any ) -> Any:
__UpperCAmelCase : Optional[int] = TFEsmModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
__UpperCAmelCase : str = model(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = [input_ids, input_mask]
__UpperCAmelCase : int = model(__lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = TFEsmModel(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Tuple = [input_ids, input_mask]
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase )
# Also check the case where encoder outputs are not passed
__UpperCAmelCase : Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : Any = TFEsmForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: List[str] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : int = TFEsmForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
__UpperCAmelCase : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: List[str] ) -> Any:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Optional[int] = config_and_inputs
__UpperCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Any = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Optional[int] ) -> int:
__UpperCAmelCase : Dict = TFEsmModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = TFEsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCamelCase ( self: List[Any] ) -> Any:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__UpperCAmelCase : Optional[int] = model.get_bias()
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for k, v in name.items():
assert isinstance(__lowerCamelCase , tf.Variable )
else:
__UpperCAmelCase : Optional[Any] = model.get_output_embeddings()
assert x is None
__UpperCAmelCase : Dict = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: str ) -> Dict:
__UpperCAmelCase : Optional[Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__UpperCAmelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Dict = model(__lowerCamelCase )[0]
__UpperCAmelCase : List[str] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCamelCase )
# compare the actual values for a slice.
__UpperCAmelCase : Dict = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : str = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__UpperCAmelCase : List[str] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
__UpperCAmelCase : str = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 351 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case = 0
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case = tuple[int, int]
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , ) -> Any:
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : Union[str, Any] = pos_y
__UpperCAmelCase : Tuple = (pos_y, pos_x)
__UpperCAmelCase : Union[str, Any] = goal_x
__UpperCAmelCase : str = goal_y
__UpperCAmelCase : List[Any] = g_cost
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Union[str, Any] = self.calculate_heuristic()
__UpperCAmelCase : List[str] = self.g_cost + self.h_cost
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.pos_x - self.goal_x
__UpperCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__SCREAMING_SNAKE_CASE ) + abs(__SCREAMING_SNAKE_CASE )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: str , __lowerCamelCase: List[str] ) -> List[str]:
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = [self.start]
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = False
def _lowerCamelCase ( self: str ) -> Optional[int]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__SCREAMING_SNAKE_CASE )
self.closed_nodes.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = self.get_successors(__SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
return [self.start.pos]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any ) -> List[Any]:
__UpperCAmelCase : int = []
for action in delta:
__UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1]
__UpperCAmelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) )
return successors
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = node
__UpperCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : int = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> List[str]:
__UpperCAmelCase : int = AStar(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = AStar(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = False
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCAmelCase : Optional[Any] = self.fwd_astar.open_nodes.pop(0 )
__UpperCAmelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.fwd_astar.closed_nodes.append(__SCREAMING_SNAKE_CASE )
self.bwd_astar.closed_nodes.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = current_bwd_node
__UpperCAmelCase : Dict = current_fwd_node
__UpperCAmelCase : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(__SCREAMING_SNAKE_CASE ),
self.bwd_astar: self.bwd_astar.get_successors(__SCREAMING_SNAKE_CASE ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__UpperCAmelCase : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
return [self.fwd_astar.start.pos]
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : List[str] = self.fwd_astar.retrace_path(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(__SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = AStar(init, goal)
_snake_case = a_star.search()
_snake_case = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_snake_case = time.time()
_snake_case = BidirectionalAStar(init, goal)
_snake_case = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
_snake_case = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _snake_case ( snake_case_ ):
lowerCamelCase__: Dict = VOCAB_FILES_NAMES
lowerCamelCase__: str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: int = RealmTokenizer
def __init__( self: Any , __lowerCamelCase: Any=None , __lowerCamelCase: str=None , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple="[UNK]" , __lowerCamelCase: str="[SEP]" , __lowerCamelCase: Any="[PAD]" , __lowerCamelCase: Union[str, Any]="[CLS]" , __lowerCamelCase: str="[MASK]" , __lowerCamelCase: int=True , __lowerCamelCase: Dict=None , **__lowerCamelCase: List[str] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Union[str, Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**__lowerCamelCase )
__UpperCAmelCase : int = do_lower_case
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Dict ) -> Optional[Any]:
__UpperCAmelCase : List[str] = PaddingStrategy.MAX_LENGTH
__UpperCAmelCase : Tuple = text
__UpperCAmelCase : Optional[int] = kwargs.pop("text_pair" , __lowerCamelCase )
__UpperCAmelCase : List[Any] = kwargs.pop("return_tensors" , __lowerCamelCase )
__UpperCAmelCase : List[str] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__lowerCamelCase ):
if batch_text_pair is not None:
__UpperCAmelCase : List[str] = batch_text_pair[idx]
else:
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[Any] = super().__call__(__lowerCamelCase , __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = encoded_candidates.get("input_ids" )
__UpperCAmelCase : Tuple = encoded_candidates.get("attention_mask" )
__UpperCAmelCase : str = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowerCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowerCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowerCamelCase )
__UpperCAmelCase : List[str] = {key: item for key, item in output_data.items() if len(__lowerCamelCase ) != 0}
return BatchEncoding(__lowerCamelCase , tensor_type=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Any=None ) -> int:
__UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict = None ) -> List[int]:
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self: int , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] = None ) -> Tuple[str]:
__UpperCAmelCase : List[str] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 353 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir('''fixtures''')
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
__UpperCAmelCase : List[str] = mock.Mock()
__UpperCAmelCase : List[str] = 5_00
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : Dict = HTTPError
__UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCAmelCase ) as mock_head:
__UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def _lowerCamelCase ( cls: str ) -> Dict:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowerCamelCase ( self: Tuple ) -> str:
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="test-feature-extractor" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _lowerCamelCase ( self: Dict ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
__UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
__UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 354 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCamelCase ( snake_case__ ) -> int:
random.seed(_A )
np.random.seed(_A )
torch.manual_seed(_A )
torch.cuda.manual_seed_all(_A )
# ^^ safe to call this function even if cuda is not available
class _snake_case :
def __init__( self: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple = 0.99_99 , __lowerCamelCase: Any = 0.0 , __lowerCamelCase: List[str] = 0 , __lowerCamelCase: Tuple = False , __lowerCamelCase: Optional[Any] = 1.0 , __lowerCamelCase: Any = 2 / 3 , __lowerCamelCase: List[Any] = None , __lowerCamelCase: Tuple = None , **__lowerCamelCase: List[Any] , ) -> List[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
__UpperCAmelCase : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : Tuple = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__UpperCAmelCase : str = True
if kwargs.get("max_value" , _SCREAMING_SNAKE_CASE ) is not None:
__UpperCAmelCase : Dict = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = kwargs["max_value"]
if kwargs.get("min_value" , _SCREAMING_SNAKE_CASE ) is not None:
__UpperCAmelCase : str = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = kwargs["min_value"]
__UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get("device" , _SCREAMING_SNAKE_CASE ) is not None:
__UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
self.to(device=kwargs["device"] )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : str = decay
__UpperCAmelCase : Optional[Any] = min_decay
__UpperCAmelCase : Union[str, Any] = update_after_step
__UpperCAmelCase : str = use_ema_warmup
__UpperCAmelCase : str = inv_gamma
__UpperCAmelCase : str = power
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = None # set in `step()`
__UpperCAmelCase : Any = model_cls
__UpperCAmelCase : str = model_config
@classmethod
def _lowerCamelCase ( cls: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> "EMAModel":
__UpperCAmelCase , __UpperCAmelCase : Dict = model_cls.load_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = model_cls.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = cls(model.parameters() , model_cls=_SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(_SCREAMING_SNAKE_CASE )
return ema_model
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: int ) -> List[Any]:
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__UpperCAmelCase : Any = self.model_cls.from_config(self.model_config )
__UpperCAmelCase : str = self.state_dict()
state_dict.pop("shadow_params" , _SCREAMING_SNAKE_CASE )
model.register_to_config(**_SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] ) -> float:
__UpperCAmelCase : Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__UpperCAmelCase : Dict = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__UpperCAmelCase : Dict = (1 + step) / (10 + step)
__UpperCAmelCase : Optional[Any] = min(_SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
__UpperCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any ) -> List[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
__UpperCAmelCase : str = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : Union[str, Any] = parameters.parameters()
__UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__UpperCAmelCase : int = self.get_decay(self.optimization_step )
__UpperCAmelCase : Any = decay
__UpperCAmelCase : Union[str, Any] = 1 - decay
__UpperCAmelCase : Optional[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__UpperCAmelCase : Dict = deepspeed.zero.GatheredParameters(_SCREAMING_SNAKE_CASE , modifier_rank=_SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[int] ) -> None:
__UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Union[str, Any]=None ) -> None:
__UpperCAmelCase : List[Any] = [
p.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=_SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def _lowerCamelCase ( self: List[str] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Tuple ) -> None:
__UpperCAmelCase : Optional[int] = [param.detach().cpu().clone() for param in parameters]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
__UpperCAmelCase : List[Any] = None
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int ) -> None:
__UpperCAmelCase : Any = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__UpperCAmelCase : Tuple = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid min_decay" )
__UpperCAmelCase : List[Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid optimization_step" )
__UpperCAmelCase : Optional[int] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid update_after_step" )
__UpperCAmelCase : Any = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid use_ema_warmup" )
__UpperCAmelCase : List[str] = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__UpperCAmelCase : List[str] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__UpperCAmelCase : List[str] = state_dict.get("shadow_params" , _SCREAMING_SNAKE_CASE )
if shadow_params is not None:
__UpperCAmelCase : Dict = shadow_params
if not isinstance(self.shadow_params , _SCREAMING_SNAKE_CASE ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 355 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
_snake_case = TypeVar('''T''')
_snake_case = Union[List[T], Tuple[T, ...]]
_snake_case = Union[T, List[T], Dict[str, T]]
_snake_case = Union[str, bytes, os.PathLike]
| 356 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 0 |
import unittest
import numpy as np
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ = None, ) -> List[Any]:
__UpperCAmelCase : Dict = np.shape(snake_case__ )
__UpperCAmelCase : Any = np.shape(snake_case__ )
__UpperCAmelCase : Optional[Any] = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
__UpperCAmelCase : Any = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
__UpperCAmelCase : str = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(snake_case__ )
__UpperCAmelCase : Tuple = pseudo_inv
if a_inv is None:
try:
__UpperCAmelCase : Optional[Any] = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any ) -> None:
__UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : Dict = np.array([[2, 1], [6, 3]] )
__UpperCAmelCase : Optional[Any] = schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = np.block([[a, b], [b.T, c]] )
__UpperCAmelCase : Any = np.linalg.det(lowerCamelCase__ )
__UpperCAmelCase : Tuple = np.linalg.det(lowerCamelCase__ )
__UpperCAmelCase : str = np.linalg.det(lowerCamelCase__ )
self.assertAlmostEqual(lowerCamelCase__ , det_a * det_s )
def _lowerCamelCase ( self: Dict ) -> None:
__UpperCAmelCase : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase__ ):
schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> None:
__UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase__ ):
schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 357 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 0 |
def _UpperCamelCase ( snake_case__ ) -> List[str]:
for i in range(0, A__ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(" ", end="" )
for _ in range(0, i + 1 ): # printing stars
print("* ", end="" )
print()
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
for i in range(A__, 0, -1 ):
for _ in range(A__, 0, -1 ): # printing stars
print("* ", end="" )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(" ", end="" )
def _UpperCamelCase ( snake_case__ ) -> str:
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
_snake_case = 1
while K:
_snake_case = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_snake_case = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 358 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase : List[Any] = (low + high) // 2
__UpperCAmelCase : Dict = max_subarray(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = max_subarray(_UpperCAmelCase, mid + 1, _UpperCAmelCase )
__UpperCAmelCase : int = max_cross_sum(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[int, int, float]:
__UpperCAmelCase : Optional[int] = float("-inf" ), -1
__UpperCAmelCase : int = float("-inf" ), -1
__UpperCAmelCase : int | float = 0
for i in range(_UpperCAmelCase, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase : Union[str, Any] = summ
__UpperCAmelCase : List[Any] = i
__UpperCAmelCase : Union[str, Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase : Any = summ
__UpperCAmelCase : Tuple = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : Any = [randint(1, _UpperCAmelCase ) for _ in range(_UpperCAmelCase )]
__UpperCAmelCase : str = time.time()
max_subarray(_UpperCAmelCase, 0, input_size - 1 )
__UpperCAmelCase : Dict = time.time()
return end - start
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : int = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__UpperCAmelCase : int = [time_max_subarray(_UpperCAmelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(_UpperCAmelCase, _UpperCAmelCase ):
print(_UpperCAmelCase, "\t\t", _UpperCAmelCase )
plt.plot(_UpperCAmelCase, _UpperCAmelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
import string
from math import logaa
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : Optional[Any] = document.translate(
str.maketrans("", "", string.punctuation ) ).replace("\n", "" )
__UpperCAmelCase : Any = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> tuple[int, int]:
__UpperCAmelCase : int = corpus.lower().translate(
str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with ''
__UpperCAmelCase : List[str] = corpus_without_punctuation.split("\n" )
__UpperCAmelCase : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ), 3 )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
return round(tf * idf, 3 )
| 360 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_snake_case = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_snake_case = logging.get_logger(__name__)
class _snake_case ( __lowercase ):
lowerCamelCase__: Any = '''maskformer'''
lowerCamelCase__: List[Any] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__: List[Any] = ['''resnet''', '''swin''']
lowerCamelCase__: List[str] = ['''detr''']
def __init__( self: Optional[int] , __lowerCamelCase: int = 2_56 , __lowerCamelCase: int = 2_56 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[Dict] = None , __lowerCamelCase: Optional[Dict] = None , __lowerCamelCase: float = 0.02 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: float = 20.0 , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Optional[Any] , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[Any] = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCAmelCase : str = backbone_config.pop("model_type" )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[int] = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : Any = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[str] = (
decoder_config.pop("model_type" ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Optional[int] = config_class.from_dict(UpperCAmelCase__ )
__UpperCAmelCase : List[Any] = backbone_config
__UpperCAmelCase : Any = decoder_config
# main feature dimension for the model
__UpperCAmelCase : List[Any] = fpn_feature_size
__UpperCAmelCase : Optional[int] = mask_feature_size
# initializer
__UpperCAmelCase : Union[str, Any] = init_std
__UpperCAmelCase : List[str] = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : Tuple = cross_entropy_weight
__UpperCAmelCase : int = dice_weight
__UpperCAmelCase : Any = mask_weight
__UpperCAmelCase : List[str] = use_auxiliary_loss
__UpperCAmelCase : Optional[Any] = no_object_weight
__UpperCAmelCase : Union[str, Any] = output_auxiliary_logits
__UpperCAmelCase : Optional[int] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Any = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: PretrainedConfig , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[str] ) -> Union[str, Any]:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def _lowerCamelCase ( self: Optional[int] ) -> Dict[str, any]:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.decoder_config.to_dict()
__UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_snake_case = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_snake_case = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_snake_case = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_snake_case = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
_snake_case = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
_snake_case = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Dict:
__UpperCAmelCase : Dict = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
__UpperCAmelCase : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
__UpperCAmelCase : Tuple = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
__UpperCAmelCase : Tuple = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
__UpperCAmelCase : int = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
__UpperCAmelCase : str = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
__UpperCAmelCase : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
__UpperCAmelCase : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
__UpperCAmelCase : Dict = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
__UpperCAmelCase : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
__UpperCAmelCase : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.weight''']
__UpperCAmelCase : Optional[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]:
__UpperCAmelCase : str = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
__UpperCAmelCase : Dict = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
__UpperCAmelCase : Optional[Any] = checkpoint[f'''{old_prefix}.norm.weight''']
__UpperCAmelCase : str = checkpoint[f'''{old_prefix}.norm.bias''']
__UpperCAmelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : str = bias_k.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : Optional[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 )
__UpperCAmelCase : List[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
__UpperCAmelCase : Optional[int] = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = torch.load(__lowerCAmelCase, map_location="cpu" )
__UpperCAmelCase : Any = {}
__UpperCAmelCase : List[str] = checkpoint["""time_embed.0.weight"""]
__UpperCAmelCase : List[str] = checkpoint["""time_embed.0.bias"""]
__UpperCAmelCase : Any = checkpoint["""time_embed.2.weight"""]
__UpperCAmelCase : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
__UpperCAmelCase : List[str] = checkpoint["""label_emb.weight"""]
__UpperCAmelCase : List[str] = checkpoint["""input_blocks.0.0.weight"""]
__UpperCAmelCase : Dict = checkpoint["""input_blocks.0.0.bias"""]
__UpperCAmelCase : int = unet_config["""down_block_types"""]
__UpperCAmelCase : Dict = unet_config["""layers_per_block"""]
__UpperCAmelCase : List[Any] = unet_config["""attention_head_dim"""]
__UpperCAmelCase : Union[str, Any] = unet_config["""block_out_channels"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : str = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
__UpperCAmelCase : Any = channels_list[i]
__UpperCAmelCase : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
__UpperCAmelCase : Tuple = f'''down_blocks.{i}.resnets.{j}'''
__UpperCAmelCase : Tuple = f'''input_blocks.{current_layer}.0'''
__UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
__UpperCAmelCase : List[Any] = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
__UpperCAmelCase : Any = f'''down_blocks.{i}.resnets.{j}'''
__UpperCAmelCase : List[Any] = f'''input_blocks.{current_layer}.0'''
__UpperCAmelCase : Any = True if j == 0 and downsample_block_has_skip else False
__UpperCAmelCase : Any = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, has_skip=__lowerCAmelCase )
__UpperCAmelCase : List[str] = f'''down_blocks.{i}.attentions.{j}'''
__UpperCAmelCase : List[Any] = f'''input_blocks.{current_layer}.1'''
__UpperCAmelCase : Optional[int] = convert_attention(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__UpperCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0'''
__UpperCAmelCase : Any = f'''input_blocks.{current_layer}.0'''
__UpperCAmelCase : Union[str, Any] = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
current_layer += 1
__UpperCAmelCase : Any = current_channels
# hardcoded the mid-block for now
__UpperCAmelCase : int = """mid_block.resnets.0"""
__UpperCAmelCase : int = """middle_block.0"""
__UpperCAmelCase : Optional[int] = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
__UpperCAmelCase : Tuple = """mid_block.attentions.0"""
__UpperCAmelCase : int = """middle_block.1"""
__UpperCAmelCase : Optional[int] = convert_attention(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
__UpperCAmelCase : Union[str, Any] = """mid_block.resnets.1"""
__UpperCAmelCase : Tuple = """middle_block.2"""
__UpperCAmelCase : Optional[Any] = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[str] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCAmelCase : List[Any] = f'''up_blocks.{i}.resnets.{j}'''
__UpperCAmelCase : str = f'''output_blocks.{current_layer}.0'''
__UpperCAmelCase : Tuple = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__UpperCAmelCase : int = f'''up_blocks.{i}.upsamplers.0'''
__UpperCAmelCase : Dict = f'''output_blocks.{current_layer-1}.1'''
__UpperCAmelCase : Optional[Any] = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCAmelCase : List[Any] = f'''up_blocks.{i}.resnets.{j}'''
__UpperCAmelCase : List[Any] = f'''output_blocks.{current_layer}.0'''
__UpperCAmelCase : Any = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, has_skip=__lowerCAmelCase )
__UpperCAmelCase : Optional[int] = f'''up_blocks.{i}.attentions.{j}'''
__UpperCAmelCase : int = f'''output_blocks.{current_layer}.1'''
__UpperCAmelCase : Union[str, Any] = convert_attention(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__UpperCAmelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0'''
__UpperCAmelCase : Any = f'''output_blocks.{current_layer-1}.2'''
__UpperCAmelCase : int = convert_resnet(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
__UpperCAmelCase : int = checkpoint["""out.0.weight"""]
__UpperCAmelCase : Union[str, Any] = checkpoint["""out.0.bias"""]
__UpperCAmelCase : List[Any] = checkpoint["""out.2.weight"""]
__UpperCAmelCase : Optional[int] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_snake_case = parser.parse_args()
_snake_case = strabool(args.class_cond)
_snake_case = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_snake_case = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_snake_case = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_snake_case = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_snake_case = None
_snake_case = con_pt_to_diffuser(args.unet_path, unet_config)
_snake_case = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_snake_case = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_snake_case = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_snake_case = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_snake_case = CMStochasticIterativeScheduler(**scheduler_config)
_snake_case = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 362 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
_snake_case = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class _snake_case ( _A ):
lowerCamelCase__: List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase__: int = "nezha"
def __init__( self: Any , __lowerCamelCase: Optional[int]=2_11_28 , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=12 , __lowerCamelCase: Dict=12 , __lowerCamelCase: List[str]=30_72 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: List[Any]=5_12 , __lowerCamelCase: Optional[Any]=64 , __lowerCamelCase: str=2 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Dict=1e-12 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: Any=0 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: str=True , **__lowerCamelCase: Optional[Any] , ) -> int:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : str = max_relative_position
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = classifier_dropout
__UpperCAmelCase : Dict = use_cache
| 363 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_snake_case = True
except ImportError:
_snake_case = False
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( snake_case__ ) -> Tuple:
return AddNewModelCommand(args.testing, args.testing_file, path=args.path )
class _snake_case ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: ArgumentParser ) -> List[str]:
__UpperCAmelCase : int = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=_SCREAMING_SNAKE_CASE , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=_SCREAMING_SNAKE_CASE , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self: List[Any] , __lowerCamelCase: bool , __lowerCamelCase: str , __lowerCamelCase: List[str]=None , *__lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : int = testing
__UpperCAmelCase : str = testing_file
__UpperCAmelCase : Union[str, Any] = path
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__UpperCAmelCase : List[str] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
__UpperCAmelCase : List[str] = (
Path(_SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__UpperCAmelCase : Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
__UpperCAmelCase : List[Any] = json.load(_SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_SCREAMING_SNAKE_CASE , extra_context=_SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : str = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
__UpperCAmelCase : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = configuration["lowercase_modelname"]
__UpperCAmelCase : List[str] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
__UpperCAmelCase : Optional[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase : int = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase : Optional[int] = "Flax" in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase : int = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=_SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(__lowerCamelCase: str ):
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__UpperCAmelCase : Dict = f.readlines()
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: List[str] ):
# Create temp file
__UpperCAmelCase : Optional[int] = mkstemp()
__UpperCAmelCase : Dict = False
with fdopen(_SCREAMING_SNAKE_CASE , "w" ) as new_file:
with open(_SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(_SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
__UpperCAmelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(_SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Remove original file
remove(_SCREAMING_SNAKE_CASE )
# Move new file
move(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def skip_units(__lowerCamelCase: List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__lowerCamelCase: List[str] ):
with open(_SCREAMING_SNAKE_CASE ) as datafile:
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__UpperCAmelCase : Optional[int] = line.split("\"" )[1]
__UpperCAmelCase : Tuple = skip_units(_SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
__UpperCAmelCase : List[str] = line.split("\"" )[1]
__UpperCAmelCase : str = skip_units(_SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = []
elif "# Replace with" in line and "##" not in line:
__UpperCAmelCase : List[Any] = []
elif "##" not in line:
lines_to_copy.append(_SCREAMING_SNAKE_CASE )
remove(_SCREAMING_SNAKE_CASE )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(_SCREAMING_SNAKE_CASE )
| 364 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_snake_case = 250004
_snake_case = 250020
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = MBartaaTokenizer
lowerCamelCase__: str = MBartaaTokenizerFast
lowerCamelCase__: Tuple = True
lowerCamelCase__: List[str] = True
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Union[str, Any] = MBartaaTokenizer(lowerCamelCase_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Tuple = """<s>"""
__UpperCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def _lowerCamelCase ( self: Dict ) -> Tuple:
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Any = MBartaaTokenizer(lowerCamelCase_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowerCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : Dict = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Any = tokenizer_r.save_pretrained(lowerCamelCase_ )
__UpperCAmelCase : Any = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
__UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
__UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__UpperCAmelCase : int = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
__UpperCAmelCase : str = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
__UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(lowerCamelCase_ )
__UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
__UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
lowerCamelCase__: List[Any] = "facebook/mbart-large-50-one-to-many-mmt"
lowerCamelCase__: Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCamelCase__: int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCamelCase__: Optional[int] = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def _lowerCamelCase ( cls: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__UpperCAmelCase : str = 1
return cls
def _lowerCamelCase ( self: Optional[Any] ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 )
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def _lowerCamelCase ( self: Optional[int] ) -> int:
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
__UpperCAmelCase : List[str] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__UpperCAmelCase : Tuple = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
__UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
__UpperCAmelCase : Optional[Any] = 10
__UpperCAmelCase : Optional[int] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
__UpperCAmelCase : Any = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self: List[Any] ) -> str:
__UpperCAmelCase : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCamelCase ( self: int ) -> str:
__UpperCAmelCase : str = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="pt" )
__UpperCAmelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors="pt" )
__UpperCAmelCase : Optional[int] = targets["""input_ids"""]
__UpperCAmelCase : str = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self: List[str] ) -> Any:
__UpperCAmelCase : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self: Tuple , __lowerCamelCase: List[Any]=64 , __lowerCamelCase: Any=4_80_00 , __lowerCamelCase: Optional[Any]=4_80 , __lowerCamelCase: List[str]=10 , __lowerCamelCase: Union[str, Any]=10_24 , __lowerCamelCase: int=0.0 , __lowerCamelCase: int=False , __lowerCamelCase: Optional[int] = 0 , __lowerCamelCase: Dict = 1_40_00 , __lowerCamelCase: Union[str, Any] = None , __lowerCamelCase: Dict = "fusion" , __lowerCamelCase: Optional[Any] = "repeatpad" , **__lowerCamelCase: List[Any] , ) -> Optional[int]:
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__UpperCAmelCase : Tuple = top_db
__UpperCAmelCase : Tuple = truncation
__UpperCAmelCase : Any = padding
__UpperCAmelCase : Tuple = fft_window_size
__UpperCAmelCase : List[str] = (fft_window_size >> 1) + 1
__UpperCAmelCase : Any = hop_length
__UpperCAmelCase : List[str] = max_length_s
__UpperCAmelCase : Union[str, Any] = max_length_s * sampling_rate
__UpperCAmelCase : Optional[int] = sampling_rate
__UpperCAmelCase : Optional[int] = frequency_min
__UpperCAmelCase : List[str] = frequency_max
__UpperCAmelCase : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm=_lowercase , mel_scale="htk" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm="slaney" , mel_scale="slaney" , )
def _lowerCamelCase ( self: Dict ) -> Dict[str, Any]:
__UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple = None ) -> np.ndarray:
__UpperCAmelCase : str = spectrogram(
_lowercase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowercase , log_mel="dB" , )
return log_mel_spectrogram.T
def _lowerCamelCase ( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : int = [0]
# randomly choose index for each part
__UpperCAmelCase : List[str] = np.random.choice(ranges[0] )
__UpperCAmelCase : Union[str, Any] = np.random.choice(ranges[1] )
__UpperCAmelCase : Any = np.random.choice(ranges[2] )
__UpperCAmelCase : Optional[Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : Dict = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : List[str] = torch.tensor(mel[None, None, :] )
__UpperCAmelCase : Dict = torch.nn.functional.interpolate(
_lowercase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_lowercase )
__UpperCAmelCase : str = mel_shrink[0][0].numpy()
__UpperCAmelCase : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : str = len(_lowercase ) - max_length
__UpperCAmelCase : Any = np.random.randint(0 , overflow + 1 )
__UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length]
__UpperCAmelCase : Dict = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(_lowercase , self.mel_filters )
__UpperCAmelCase : int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : Optional[Any] = self._random_mel_fusion(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase : Dict = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__UpperCAmelCase : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(_lowercase ) )
__UpperCAmelCase : Tuple = np.stack(np.tile(_lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : str = int(max_length / len(_lowercase ) )
__UpperCAmelCase : Tuple = np.stack(np.tile(_lowercase , _lowercase ) )
__UpperCAmelCase : Tuple = np.pad(_lowercase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__UpperCAmelCase : str = self._np_extract_fbank_features(_lowercase , self.mel_filters )
__UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Tuple = None , __lowerCamelCase: int = None , __lowerCamelCase: str = None , __lowerCamelCase: int = None , __lowerCamelCase: List[str] = None , **__lowerCamelCase: Any , ) -> BatchFeature:
__UpperCAmelCase : List[str] = truncation if truncation is not None else self.truncation
__UpperCAmelCase : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : Optional[Any] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Tuple = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
__UpperCAmelCase : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Any = [np.asarray(_lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : int = [
self._get_input_mel(_lowercase , max_length if max_length else self.nb_max_samples , _lowercase , _lowercase )
for waveform in raw_speech
]
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(_lowercase )
is_longer.append(_lowercase )
if truncation == "fusion" and sum(_lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Optional[Any] = np.random.randint(0 , len(_lowercase ) )
__UpperCAmelCase : Union[str, Any] = True
if isinstance(input_mel[0] , _lowercase ):
__UpperCAmelCase : str = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : List[Any] = [[longer] for longer in is_longer]
__UpperCAmelCase : List[Any] = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : List[str] = BatchFeature(_lowercase )
if return_tensors is not None:
__UpperCAmelCase : Union[str, Any] = input_features.convert_to_tensors(_lowercase )
return input_features
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None ) -> int:
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__UpperCAmelCase : Dict = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__UpperCAmelCase : Dict = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : List[str] = np.asarray(weights[0] )
__UpperCAmelCase : str = np.asarray(weights[1] )
__UpperCAmelCase : int = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).contiguous().view(-1, SCREAMING_SNAKE_CASE__ ), )
set_param(
torch_layer.self_attention.value, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).contiguous().view(-1, SCREAMING_SNAKE_CASE__ ), )
set_param(
torch_layer.output.dense, torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1, SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0, 1 ), )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : str = np.asarray(weights[0] )
__UpperCAmelCase : Optional[Any] = np.asarray(weights[1] )
__UpperCAmelCase : Dict = np.asarray(weights[2] )
__UpperCAmelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).contiguous().view(-1, SCREAMING_SNAKE_CASE__ ), )
set_param(
torch_layer.self_attention.key, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).contiguous().view(-1, SCREAMING_SNAKE_CASE__ ), )
set_param(
torch_layer.self_attention.value, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1, 2 ).contiguous().view(-1, SCREAMING_SNAKE_CASE__ ), )
set_param(
torch_layer.output.dense, torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1, SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0, 1 ), )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : List[str] = weights[0][0][0]
__UpperCAmelCase : Tuple = np.asarray(layer_norm_a[0] )
__UpperCAmelCase : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
# lsh weights + output
__UpperCAmelCase : Dict = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__, torch_block.attention, SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__, torch_block.attention, SCREAMING_SNAKE_CASE__ )
# intermediate weighs
__UpperCAmelCase : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
__UpperCAmelCase : Tuple = intermediate_weights[2]
# layernorm 2
__UpperCAmelCase : int = np.asarray(intermediate_weights[0][0] )
__UpperCAmelCase : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
# intermediate dense
__UpperCAmelCase : str = np.asarray(intermediate_weights[1][0] )
__UpperCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0, 1 ).contiguous(), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
# intermediate out
__UpperCAmelCase : List[str] = np.asarray(intermediate_weights[4][0] )
__UpperCAmelCase : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0, 1 ).contiguous(), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Tuple = torch_model.reformer
# word embeds
__UpperCAmelCase : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(SCREAMING_SNAKE_CASE__ ), )
if isinstance(weights[3], SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCAmelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
__UpperCAmelCase : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCAmelCase : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# output layer norm
__UpperCAmelCase : Tuple = np.asarray(weights[7][0] )
__UpperCAmelCase : Dict = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
# output embeddings
__UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
__UpperCAmelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0, 1 ).contiguous(), torch.tensor(SCREAMING_SNAKE_CASE__ ), )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : List[Any] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Optional[int] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
__UpperCAmelCase : Optional[int] = pickle.load(SCREAMING_SNAKE_CASE__ )["weights"]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
from sklearn.metrics import fa_score
import datasets
_snake_case = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_snake_case = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_snake_case = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: int ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=1 , __lowerCamelCase: str="binary" , __lowerCamelCase: Tuple=None ) -> Union[str, Any]:
__UpperCAmelCase : Any = fa_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase )
return {"f1": float(__lowerCamelCase ) if score.size == 1 else score}
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
import socket
def _UpperCamelCase ( ) -> Tuple:
__UpperCAmelCase : Optional[int] = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
__UpperCAmelCase : Union[str, Any] = socket.gethostname()
__UpperCAmelCase : Tuple = 1_2312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file", "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
__UpperCAmelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def _UpperCamelCase ( snake_case__ ) -> Any:
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Dict ) -> str:
__UpperCAmelCase : Optional[int] = metric_id
class _snake_case :
lowerCamelCase__: int = [MetricMock(_lowercase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCamelCase ( self: List[str] ) -> List[str]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
if "tmp_path" in args:
__UpperCAmelCase : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(snake_case__, match="https://huggingface.co/docs/evaluate" ):
func(*snake_case__ )
| 371 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 0 |
from jiwer import compute_measures
import datasets
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_snake_case = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Any ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int=None , __lowerCamelCase: str=None , __lowerCamelCase: List[str]=False ) -> Optional[Any]:
if concatenate_texts:
return compute_measures(__lowerCamelCase , __lowerCamelCase )["wer"]
else:
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = 0
for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = compute_measures(__lowerCamelCase , __lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 350 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
def _UpperCamelCase ( snake_case__=2_8123 ) -> Tuple:
__UpperCAmelCase : Tuple = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__UpperCAmelCase : Union[str, Any] = set()
__UpperCAmelCase : Dict = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 351 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int=7 , __lowerCamelCase: Any=3 , __lowerCamelCase: Optional[int]=18 , __lowerCamelCase: Optional[int]=30 , __lowerCamelCase: Optional[Any]=4_00 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Any=[0.5, 0.5, 0.5] , __lowerCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: Union[str, Any]=False , ) -> int:
__UpperCAmelCase : str = size if size is not None else {'''height''': 20, '''width''': 20}
__UpperCAmelCase : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : str = image_size
__UpperCAmelCase : List[str] = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Any = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : Tuple = do_center_crop
__UpperCAmelCase : List[str] = crop_size
__UpperCAmelCase : Dict = do_normalize
__UpperCAmelCase : Optional[int] = image_mean
__UpperCAmelCase : Tuple = image_std
__UpperCAmelCase : Tuple = do_reduce_labels
def _lowerCamelCase ( self: int ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : int = load_dataset("hf-internal-testing/fixtures_ade20k", split="test" )
__UpperCAmelCase : Union[str, Any] = Image.open(dataset[0]["file"] )
__UpperCAmelCase : List[Any] = Image.open(dataset[1]["file"] )
return image, map
def _UpperCamelCase ( ) -> Optional[int]:
__UpperCAmelCase : Dict = load_dataset("hf-internal-testing/fixtures_ade20k", split="test" )
__UpperCAmelCase : Tuple = Image.open(ds[0]["file"] )
__UpperCAmelCase : List[Any] = Image.open(ds[1]["file"] )
__UpperCAmelCase : Dict = Image.open(ds[2]["file"] )
__UpperCAmelCase : List[str] = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = BeitImageProcessingTester(self )
@property
def _lowerCamelCase ( self: Any ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
__UpperCAmelCase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
pass
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self: List[str] ) -> Dict:
# Initialize image_processing
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
# Initialize image_processing
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self: int ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__UpperCAmelCase : List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__UpperCAmelCase : Any = prepare_semantic_single_inputs()
__UpperCAmelCase : int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__UpperCAmelCase : int = prepare_semantic_batch_inputs()
__UpperCAmelCase : str = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__UpperCAmelCase : str = prepare_semantic_single_inputs()
__UpperCAmelCase : List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 353 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def _UpperCamelCase ( snake_case__ ) -> Optional[MinHash]:
if len(_lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase : List[str] = MinHash(num_perm=_lowerCAmelCase )
for token in set(_lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( snake_case__ ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCAmelCase ) if len(t.strip() ) > 0}
class _snake_case :
def __init__( self: Dict , *,
__lowerCamelCase: float = 0.85 , ) -> Optional[int]:
__UpperCAmelCase : List[Any] = duplication_jaccard_threshold
__UpperCAmelCase : Any = NUM_PERM
__UpperCAmelCase : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase : Any = defaultdict(__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple , __lowerCamelCase: MinHash ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase : List[Any] = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
__UpperCAmelCase : int = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> str:
__UpperCAmelCase : List[Any] = self.get_duplicate_clusters()
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Dict = element
__UpperCAmelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_lowerCAmelCase, max_queue_size=1_0000 ), chunksize=100, ):
if data is not None:
yield data
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : Any = DuplicationIndex(duplication_jaccard_threshold=_lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCAmelCase ) ), max_queue_size=100 ) ):
di.add(_lowerCAmelCase, _lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : str = get_tokens(_lowerCAmelCase )
__UpperCAmelCase : int = get_tokens(_lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
__UpperCAmelCase : List[str] = []
for elementa in cluster:
__UpperCAmelCase : Optional[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__UpperCAmelCase : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowerCAmelCase, _lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase : Tuple = 1
extremes.append(_lowerCAmelCase )
return extremes
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]:
global _shared_dataset
__UpperCAmelCase : Dict = dataset
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Union[str, Any] = partial(_find_cluster_extremes_shared, jaccard_threshold=_lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCAmelCase, _lowerCAmelCase, ), total=len(_lowerCAmelCase ), ):
extremes_list.append(_lowerCAmelCase )
return extremes_list
def _UpperCamelCase ( snake_case__, snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__UpperCAmelCase : Union[str, Any] = make_duplicate_clusters(_lowerCAmelCase, _lowerCAmelCase )
__UpperCAmelCase : Tuple = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase : str = {}
__UpperCAmelCase : str = find_extremes(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase : Dict = element
__UpperCAmelCase : Any = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase : Tuple = dataset.filter(lambda snake_case__, snake_case__ : idx not in remove_indices, with_indices=_lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase : str = element["base_index"] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase : Dict = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(_lowerCAmelCase )}''' )
print(f'''Number of duplicate clusters: {len(_lowerCAmelCase )}''' )
print(f'''Files in duplicate cluster: {len(_lowerCAmelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(_lowerCAmelCase )}''' )
print(f'''Filtered dataset size: {len(_lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 354 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "efficientnet"
def __init__( self: List[Any] , __lowerCamelCase: List[Any] = 3 , __lowerCamelCase: Any = 6_00 , __lowerCamelCase: Optional[Any] = 2.0 , __lowerCamelCase: Union[str, Any] = 3.1 , __lowerCamelCase: str = 8 , __lowerCamelCase: List[Any] = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase: int = [32, 16, 24, 40, 80, 1_12, 1_92] , __lowerCamelCase: Tuple = [16, 24, 40, 80, 1_12, 1_92, 3_20] , __lowerCamelCase: str = [] , __lowerCamelCase: Union[str, Any] = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase: Optional[Any] = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase: Optional[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase: List[str] = 0.25 , __lowerCamelCase: Optional[Any] = "swish" , __lowerCamelCase: str = 25_60 , __lowerCamelCase: Optional[Any] = "mean" , __lowerCamelCase: str = 0.02 , __lowerCamelCase: Union[str, Any] = 0.0_01 , __lowerCamelCase: Dict = 0.99 , __lowerCamelCase: List[str] = 0.5 , __lowerCamelCase: List[str] = 0.2 , **__lowerCamelCase: Any , ) -> List[str]:
super().__init__(**lowerCamelCase_ )
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : Any = width_coefficient
__UpperCAmelCase : str = depth_coefficient
__UpperCAmelCase : int = depth_divisor
__UpperCAmelCase : Tuple = kernel_sizes
__UpperCAmelCase : List[Any] = in_channels
__UpperCAmelCase : int = out_channels
__UpperCAmelCase : Dict = depthwise_padding
__UpperCAmelCase : str = strides
__UpperCAmelCase : Dict = num_block_repeats
__UpperCAmelCase : Optional[Any] = expand_ratios
__UpperCAmelCase : List[Any] = squeeze_expansion_ratio
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Dict = hidden_dim
__UpperCAmelCase : Optional[int] = pooling_type
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Any = batch_norm_eps
__UpperCAmelCase : Any = batch_norm_momentum
__UpperCAmelCase : str = dropout_rate
__UpperCAmelCase : str = drop_connect_rate
__UpperCAmelCase : Optional[int] = sum(lowerCamelCase_ ) * 4
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Dict ) -> Dict:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
return 1e-5
| 355 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 356 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _snake_case :
def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Any=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Optional[Any]=99 , __lowerCamelCase: Union[str, Any]=64 , __lowerCamelCase: Dict=32 , __lowerCamelCase: Optional[int]=5 , __lowerCamelCase: Any=4 , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: int=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Any=None , ) -> Tuple:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Any = embedding_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Tuple = scope
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Any = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict ) -> int:
__UpperCAmelCase : str = MegatronBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCAmelCase : List[Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCAmelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = MegatronBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase : str = MegatronBertForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[str] ) -> Optional[Any]:
__UpperCAmelCase : int = MegatronBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : str = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = MegatronBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : str = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = MegatronBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : List[Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: str ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Any = MegatronBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[Any] = MegatronBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: List[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.num_choices
__UpperCAmelCase : Union[str, Any] = MegatronBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : int = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
lowerCamelCase__: List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__: Dict = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__: Any = True
# test_resize_embeddings = False
lowerCamelCase__: List[Any] = False
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False ) -> Union[str, Any]:
__UpperCAmelCase : Dict = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__UpperCAmelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def _lowerCamelCase ( self: Tuple ) -> str:
__UpperCAmelCase : Union[str, Any] = MegatronBertModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> Any:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase__ )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase__ )
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase__ )
def _lowerCamelCase ( self: Any ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase__ )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase__ )
def _UpperCamelCase ( snake_case__ ) -> List[str]:
return torch.tensor(
lowercase_, dtype=torch.long, device=lowercase_, )
_snake_case = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Dict = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__UpperCAmelCase : List[Any] = os.path.join(os.environ["MYDIR"] , lowerCamelCase__ )
__UpperCAmelCase : List[str] = MegatronBertModel.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.half()
__UpperCAmelCase : Dict = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
__UpperCAmelCase : int = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , lowerCamelCase__ )
__UpperCAmelCase : Tuple = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase : Optional[int] = output[0, ii, jj]
__UpperCAmelCase : Union[str, Any] = expected[3 * ii + jj]
__UpperCAmelCase : List[str] = "ii={} jj={} a={} b={}".format(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(math.isclose(lowerCamelCase__ , lowerCamelCase__ , rel_tol=lowerCamelCase__ , abs_tol=lowerCamelCase__ ) , msg=lowerCamelCase__ )
| 357 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 0 |
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _UpperCamelCase ( snake_case__ ) -> Any:
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule" )
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def _UpperCamelCase ( snake_case__ ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(A_, id=A_ )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__UpperCAmelCase : List[Any] = 0
# Doctest custom flag to ignore output.
_snake_case = doctest.register_optionflag('''IGNORE_RESULT''')
_snake_case = doctest.OutputChecker
class _snake_case ( a_ ):
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str ) -> Tuple:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
_snake_case = CustomOutputChecker
_snake_case = HfDoctestModule
_snake_case = HfDocTestParser
| 358 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int]=13 , __lowerCamelCase: int=30 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: List[str]=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Dict=10 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Any=0.6 , __lowerCamelCase: Tuple=None , ) -> str:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : str = image_size
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Optional[int] = mask_ratio
__UpperCAmelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
__UpperCAmelCase : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Optional[int] ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self: int , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: List[str] ) -> str:
__UpperCAmelCase : Any = ViTMAEModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple ) -> int:
__UpperCAmelCase : int = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Dict = model(__A )
__UpperCAmelCase : List[str] = (self.image_size // self.patch_size) ** 2
__UpperCAmelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Any = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Union[str, Any] = model(__A )
__UpperCAmelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__: Optional[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Any = ViTMAEModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(__A )
__UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
__UpperCAmelCase : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__UpperCAmelCase : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCAmelCase : Tuple = torch.from_numpy(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCAmelCase : Optional[int] = pt_noise
super().check_pt_tf_models(__A , __A , __A )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__A , __A ) )
__UpperCAmelCase : Optional[int] = outputs[0].cpu().numpy()
__UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
__UpperCAmelCase : Dict = model_class.from_pretrained(__A )
model.to(__A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__A , __A ) )
# Make sure we don't have nans
__UpperCAmelCase : int = after_outputs[0].cpu().numpy()
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self: Optional[Any] ) -> int:
pass
@slow
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = ViTMAEModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__UpperCAmelCase : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__A )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCAmelCase : Optional[int] = ViTMAEConfig()
__UpperCAmelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCAmelCase : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__A , noise=torch.from_numpy(__A ).to(device=__A ) )
# verify the logits
__UpperCAmelCase : Optional[int] = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCAmelCase : Dict = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A ) , atol=1e-4 ) )
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
from __future__ import annotations
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , ) -> int:
__UpperCAmelCase : Optional[Any] = pos_x
__UpperCAmelCase : Tuple = pos_y
__UpperCAmelCase : Optional[int] = (pos_y, pos_x)
__UpperCAmelCase : Tuple = goal_x
__UpperCAmelCase : Tuple = goal_y
__UpperCAmelCase : Tuple = g_cost
__UpperCAmelCase : Any = parent
__UpperCAmelCase : int = self.calculate_heuristic()
def _lowerCamelCase ( self: Tuple ) -> float:
__UpperCAmelCase : Any = abs(self.pos_x - self.goal_x )
__UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Tuple , __lowerCamelCase: Optional[Any] ) -> bool:
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__UpperCAmelCase : int = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : Tuple = False
def _lowerCamelCase ( self: List[str] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase : Any = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int ) -> list[Node]:
__UpperCAmelCase : int = []
for action in delta:
__UpperCAmelCase : Optional[int] = parent.pos_x + action[1]
__UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] ) -> Path:
__UpperCAmelCase : Dict = node
__UpperCAmelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : str = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
_snake_case = GreedyBestFirst(init, goal)
_snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_snake_case = 2
for elem in grid:
print(elem)
| 360 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 0 |
from __future__ import annotations
from collections import namedtuple
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Optional[int] = namedtuple("result", "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage", power / current )
elif current == 0:
return result("current", power / voltage )
elif power == 0:
return result("power", float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _snake_case ( lowerCamelCase__ ):
lowerCamelCase__: str = """rwkv"""
lowerCamelCase__: str = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Union[str, Any] , __lowerCamelCase: Tuple=5_02_77 , __lowerCamelCase: int=10_24 , __lowerCamelCase: Dict=40_96 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Dict=1e-5 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: List[str]=6 , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Dict=True , **__lowerCamelCase: Optional[int] , ) -> List[str]:
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : List[str] = context_length
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : str = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCAmelCase : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : int = rescale_every
__UpperCAmelCase : Any = use_cache
__UpperCAmelCase : List[str] = bos_token_id
__UpperCAmelCase : Any = eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 362 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 0 |
"""simple docstring"""
_snake_case = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 363 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> str:
if not isinstance(snake_case__, snake_case__ ):
__UpperCAmelCase : str = f'''Expected string as input, found {type(snake_case__ )}'''
raise ValueError(snake_case__ )
if not isinstance(snake_case__, snake_case__ ):
__UpperCAmelCase : List[str] = f'''Expected boolean as use_pascal parameter, found {type(snake_case__ )}'''
raise ValueError(snake_case__ )
__UpperCAmelCase : Union[str, Any] = input_str.split("_" )
__UpperCAmelCase : Dict = 0 if use_pascal else 1
__UpperCAmelCase : List[str] = words[start_index:]
__UpperCAmelCase : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
__UpperCAmelCase : int = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _snake_case ( __lowerCAmelCase ):
lowerCamelCase__: List[str] = '''sew-d'''
def __init__( self: List[Any] , __lowerCamelCase: List[str]=32 , __lowerCamelCase: List[str]=7_68 , __lowerCamelCase: int=12 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: List[Any]=30_72 , __lowerCamelCase: Any=2 , __lowerCamelCase: int=5_12 , __lowerCamelCase: int=2_56 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Optional[int]=("p2c", "c2p") , __lowerCamelCase: Optional[Any]="layer_norm" , __lowerCamelCase: Dict="gelu_python" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: str=0.1 , __lowerCamelCase: List[str]=0.02 , __lowerCamelCase: str=1e-7 , __lowerCamelCase: List[Any]=1e-5 , __lowerCamelCase: Any="group" , __lowerCamelCase: str="gelu" , __lowerCamelCase: int=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase: List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase: Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Any=1_28 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[int]=0.05 , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: str=0 , __lowerCamelCase: Optional[int]="mean" , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=False , __lowerCamelCase: List[str]=2_56 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Any=2 , **__lowerCamelCase: Optional[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Optional[int] = feat_extract_norm
__UpperCAmelCase : Tuple = feat_extract_activation
__UpperCAmelCase : str = list(lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = list(lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = list(lowerCAmelCase_ )
__UpperCAmelCase : Dict = conv_bias
__UpperCAmelCase : Tuple = num_conv_pos_embeddings
__UpperCAmelCase : Dict = num_conv_pos_embedding_groups
__UpperCAmelCase : Optional[Any] = len(self.conv_dim )
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[str] = squeeze_factor
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Optional[Any] = position_buckets
__UpperCAmelCase : Union[str, Any] = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : Union[str, Any] = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowerCAmelCase_ )
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : int = feat_proj_dropout
__UpperCAmelCase : List[str] = final_dropout
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Tuple = feature_layer_norm_eps
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : List[Any] = apply_spec_augment
__UpperCAmelCase : Optional[Any] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Union[str, Any] = mask_time_min_masks
__UpperCAmelCase : str = mask_feature_prob
__UpperCAmelCase : Dict = mask_feature_length
__UpperCAmelCase : Tuple = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : List[str] = ctc_loss_reduction
__UpperCAmelCase : str = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : Tuple = use_weighted_layer_sum
__UpperCAmelCase : List[Any] = classifier_proj_size
@property
def _lowerCamelCase ( self: str ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import collections
import os
import re
from pathlib import Path
_snake_case = 'src/transformers'
# Matches is_xxx_available()
_snake_case = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_snake_case = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_snake_case = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_snake_case = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_snake_case = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_snake_case = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_snake_case = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_snake_case = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_snake_case = re.compile(r'''^\s*try:''')
# Catches a line with else:
_snake_case = re.compile(r'''^\s*else:''')
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
__UpperCAmelCase : Any = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def _UpperCamelCase ( snake_case__ ) -> Any:
with open(lowerCAmelCase__, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : Optional[int] = f.readlines()
__UpperCAmelCase : int = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCAmelCase : Optional[int] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__UpperCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
__UpperCAmelCase : Union[str, Any] = re.findall(r"\[([^\]]+)\]", lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__UpperCAmelCase : int = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
__UpperCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__UpperCAmelCase : Any = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__UpperCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
__UpperCAmelCase : Optional[Any] = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(", " )
__UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
__UpperCAmelCase : Optional[Any] = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(", " )
__UpperCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__UpperCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCAmelCase : Optional[Any] = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__UpperCAmelCase : int = lines[line_index]
__UpperCAmelCase : Any = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCAmelCase : str = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__UpperCAmelCase : int = lines[line_index]
__UpperCAmelCase : Tuple = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
def find_duplicates(snake_case__ ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCAmelCase : str = []
for key in import_dict_objects.keys():
__UpperCAmelCase : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__UpperCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCAmelCase : Union[str, Any] = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase__, "__init__.py" )
__UpperCAmelCase : Any = parse_init(lowerCAmelCase__ )
if objects is not None:
__UpperCAmelCase : Union[str, Any] = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
__UpperCAmelCase : Any = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
__UpperCAmelCase : Union[str, Any] = short_path.replace(os.path.sep, "." )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
__UpperCAmelCase : Optional[Any] = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
__UpperCAmelCase : Dict = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
_snake_case = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _UpperCamelCase ( ) -> List[str]:
from transformers.utils import direct_transformers_import
__UpperCAmelCase : Union[str, Any] = direct_transformers_import(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase__, "__init__.py" ), "r" ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]", lowerCAmelCase__ ) ) )
__UpperCAmelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : str = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Optional[Any] , *__lowerCamelCase: Any , **__lowerCamelCase: str ) -> Optional[int]:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(UpperCamelCase__ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any]=None , __lowerCamelCase: int=None , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : Any = {}, {}
if padding is not None:
__UpperCAmelCase : str = padding
if truncation is not None:
__UpperCAmelCase : Optional[int] = truncation
if top_k is not None:
__UpperCAmelCase : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Union["Image.Image", str] , __lowerCamelCase: str = None , **__lowerCamelCase: Dict ) -> Tuple:
if isinstance(UpperCamelCase__ , (Image.Image, str) ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase : Any = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase : Tuple = image
__UpperCAmelCase : Union[str, Any] = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[Any]=False ) -> List[str]:
__UpperCAmelCase : Optional[int] = load_image(inputs["image"] )
__UpperCAmelCase : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )
__UpperCAmelCase : Tuple = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
return model_inputs
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] ) -> Tuple:
__UpperCAmelCase : Any = self.model(**UpperCamelCase__ )
return model_outputs
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
__UpperCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase : Union[str, Any] = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : List[Any] = scores.tolist()
__UpperCAmelCase : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Any=30 , __lowerCamelCase: Any=2 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Any=32 , __lowerCamelCase: int=5 , __lowerCamelCase: Dict=4 , __lowerCamelCase: List[Any]=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: str=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: int=10 , __lowerCamelCase: str=0.02 , ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : Any = num_patches + 1
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = FlaxViTModel(config=__lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Tuple = (self.image_size, self.image_size)
__UpperCAmelCase : Optional[Any] = (self.patch_size, self.patch_size)
__UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size
__UpperCAmelCase : List[Any] = FlaxViTForImageClassification(config=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : int = FlaxViTForImageClassification(__lowerCamelCase )
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Dict = config_and_inputs
__UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( _a , unittest.TestCase ):
lowerCamelCase__: Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowerCamelCase ( self: List[Any] ) -> None:
__UpperCAmelCase : int = FlaxViTModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: str ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase: List[Any] , **__lowerCamelCase: int ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__UpperCAmelCase : Optional[int] = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCAmelCase : Optional[Any] = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Any = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__UpperCAmelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(__lowerCamelCase )
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Any=2 , __lowerCamelCase: Dict=3 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: List[str]=[32, 64, 1_28] , __lowerCamelCase: str=[1, 2, 1] , __lowerCamelCase: Optional[Any]=[2, 2, 4] , __lowerCamelCase: Tuple=2 , __lowerCamelCase: List[Any]=2.0 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: str=0.0 , __lowerCamelCase: str=0.1 , __lowerCamelCase: str="gelu" , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Optional[Any]=1e-5 , __lowerCamelCase: str=True , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: int=True , __lowerCamelCase: List[str]=10 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=["stage1", "stage2"] , __lowerCamelCase: Union[str, Any]=[1, 2] , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Dict = embed_dim
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[Any] = num_heads
__UpperCAmelCase : Optional[Any] = window_size
__UpperCAmelCase : Optional[int] = mlp_ratio
__UpperCAmelCase : Union[str, Any] = qkv_bias
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = use_absolute_embeddings
__UpperCAmelCase : List[str] = patch_norm
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Tuple = encoder_stride
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : List[Any] = out_indices
def _lowerCamelCase ( self: Tuple ) -> str:
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: int ) -> Dict:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : str = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase__ )
__UpperCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[Any] = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[Any] = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> int:
__UpperCAmelCase : Optional[int] = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : str = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Dict = self.type_sequence_label_size
__UpperCAmelCase : Optional[Any] = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __a , __a , unittest.TestCase ):
lowerCamelCase__: Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: int = False
lowerCamelCase__: str = False
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Tuple = FocalNetModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: str ) -> int:
return
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _lowerCamelCase ( self: Dict ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowerCamelCase ( self: str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase__ )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
__UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = reshaped_hidden_states[0].shape
__UpperCAmelCase : Any = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Tuple = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _lowerCamelCase ( self: int ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: str ) -> Dict:
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> str:
__UpperCAmelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase__ )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : List[str] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
__UpperCAmelCase : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__UpperCAmelCase : Any = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _snake_case ( __a , unittest.TestCase ):
lowerCamelCase__: List[str] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__: Optional[Any] = FocalNetConfig
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = FocalNetModelTester(self )
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_snake_case = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _UpperCamelCase ( snake_case__, snake_case__=None ) -> List[Any]:
require_version(deps[pkg], snake_case_ )
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = 10**-10 ) -> Dict:
__UpperCAmelCase : List[str] = a
while True:
__UpperCAmelCase : Union[str, Any] = Decimal(a__ ) - (
Decimal(eval(a__ ) ) / Decimal(eval(str(diff(a__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a__ ) ) < precision: # noqa: S307
return float(a__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 371 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = k_size // 2
__UpperCAmelCase : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) )
return g
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : int = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Tuple = 0
for i, j in product(range(__a ), range(__a ) ):
__UpperCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : str = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : List[Any] = gen_gaussian_kernel(__a, __a )
__UpperCAmelCase : str = ravel(__a )
# reshape and get the dst image
__UpperCAmelCase : Optional[int] = dot(__a, __a ).reshape(__a, __a ).astype(__a )
return dst
if __name__ == "__main__":
# read original image
_snake_case = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_snake_case = gaussian_filter(gray, 3, sigma=1)
_snake_case = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 350 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : int = """\n""".join(A__ )
Path(A__ ).open("w" ).writelines(A__ )
_snake_case = "patrickvonplaten/t5-tiny-random"
_snake_case = "sshleifer/bart-tiny-random"
_snake_case = "sshleifer/tiny-mbart"
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( _SCREAMING_SNAKE_CASE ):
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: str ) -> Any:
__UpperCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : List[str] = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__UpperCAmelCase : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : Any = f'''\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '''.split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_generate()
assert Path(__lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
self.run_eval_tester(__lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _lowerCamelCase ( self: str , __lowerCamelCase: str ) -> Optional[int]:
self.run_eval_tester(__lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : Union[str, Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__UpperCAmelCase : Dict = Path(self.get_auto_remove_tmp_dir() )
__UpperCAmelCase : Any = str(tmp_dir / "scores.json" )
__UpperCAmelCase : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(__lowerCamelCase , text["en"] )
_dump_articles(__lowerCamelCase , text["de"] )
__UpperCAmelCase : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : int = f'''\n run_eval_search.py\n {model}\n {str(__lowerCamelCase )}\n {str(__lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '''.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__UpperCAmelCase : Optional[int] = [""" num_beams | length_penalty""", model, """Best score args"""]
__UpperCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(__lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowerCamelCase ).exists()
os.remove(Path(__lowerCamelCase ) )
| 351 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 0 |
import numpy as np
_snake_case = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class _snake_case :
def __init__( self: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = np.array(snake_case_ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Optional[Any]:
__UpperCAmelCase : Any = np.where(letter == self.SQUARE )
__UpperCAmelCase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str ) -> Optional[Any]:
__UpperCAmelCase : Tuple = message.lower()
__UpperCAmelCase : Tuple = message.replace(" " , "" )
__UpperCAmelCase : Dict = message.replace("j" , "i" )
__UpperCAmelCase : str = np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
__UpperCAmelCase : str = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : Dict = numbers[0]
__UpperCAmelCase : Union[str, Any] = numbers[1]
__UpperCAmelCase : Dict = first_step.reshape(2 * len(snake_case_ ) )
__UpperCAmelCase : List[str] = ''''''
for numbers_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Dict = int(second_step[numbers_index * 2] )
__UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] )
__UpperCAmelCase : Optional[int] = self.numbers_to_letter(snake_case_ , snake_case_ )
__UpperCAmelCase : Any = encoded_message + letter
return encoded_message
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = message.lower()
message.replace(" " , "" )
__UpperCAmelCase : Union[str, Any] = np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : int = numbers[0]
__UpperCAmelCase : Union[str, Any] = numbers[1]
__UpperCAmelCase : Dict = first_step.reshape((2, len(snake_case_ )) )
__UpperCAmelCase : Optional[Any] = ''''''
for numbers_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Union[str, Any] = int(second_step[0, numbers_index] )
__UpperCAmelCase : str = int(second_step[1, numbers_index] )
__UpperCAmelCase : Dict = self.numbers_to_letter(snake_case_ , snake_case_ )
__UpperCAmelCase : Optional[int] = decoded_message + letter
return decoded_message
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = "philschmid/bart-large-cnn-samsum"
lowerCamelCase__: Optional[int] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
lowerCamelCase__: Tuple = "summarizer"
lowerCamelCase__: int = AutoTokenizer
lowerCamelCase__: Dict = AutoModelForSeqaSeqLM
lowerCamelCase__: Tuple = ["text"]
lowerCamelCase__: int = ["text"]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Tuple ) -> Dict:
return self.pre_processor(__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
return self.model.generate(**__lowerCamelCase )[0]
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Union[str, Any] ) -> Tuple:
return self.pre_processor.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
| 353 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Any , __lowerCamelCase: List[Any]=13 , __lowerCamelCase: Dict=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Dict=99 , __lowerCamelCase: Optional[Any]=32 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: int=4 , __lowerCamelCase: List[Any]=37 , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: List[str]=5_12 , __lowerCamelCase: str=16 , __lowerCamelCase: str=2 , __lowerCamelCase: List[str]=0.02 , __lowerCamelCase: List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Dict = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[Any] = use_attention_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[Any] = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : str = None
if self.use_token_type_ids:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self: str ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : str = True
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCamelCase__: List[Any] = True
lowerCamelCase__: List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self: int ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("roberta-base" , from_pt=a__ )
__UpperCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
| 354 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 0 |
_snake_case = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 355 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _snake_case :
def __init__( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=13 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: int=7 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=False , __lowerCamelCase: List[str]=True , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: Any=4 , __lowerCamelCase: Any=30 , __lowerCamelCase: str=0 , __lowerCamelCase: List[str]=1 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Optional[Any]=None , ) -> Dict:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : int = decoder_seq_length
# For common tests
__UpperCAmelCase : str = self.decoder_seq_length
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[Any] = use_attention_mask
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : int = d_model
__UpperCAmelCase : Optional[Any] = decoder_layers
__UpperCAmelCase : Optional[int] = decoder_layers
__UpperCAmelCase : str = decoder_ffn_dim
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : List[str] = decoder_attention_heads
__UpperCAmelCase : int = eos_token_id
__UpperCAmelCase : int = bos_token_id
__UpperCAmelCase : Optional[Any] = pad_token_id
__UpperCAmelCase : Tuple = decoder_start_token_id
__UpperCAmelCase : Any = use_cache
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Any = None
__UpperCAmelCase : Union[str, Any] = decoder_seq_length
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : List[str] = 1
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCAmelCase : Tuple = None
if self.use_attention_mask:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCAmelCase : Tuple = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , ) -> List[Any]:
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = TrOCRDecoder(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
__UpperCAmelCase : Tuple = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCAmelCase : Dict = model(__lowerCamelCase , use_cache=__lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
__UpperCAmelCase : str = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : str = model(__lowerCamelCase )['''last_hidden_state''']
__UpperCAmelCase : str = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCAmelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
def _lowerCamelCase ( self: Optional[int] ) -> str:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : str = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase__: Union[str, Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase__: List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase__: Union[str, Any] = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase__: List[str] = True
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> List[str]:
pass
def _lowerCamelCase ( self: Any ) -> List[str]:
pass
def _lowerCamelCase ( self: Dict ) -> str:
pass
def _lowerCamelCase ( self: Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Any:
return
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self: Optional[int] ) -> int:
pass
| 356 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_snake_case = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_snake_case = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_snake_case = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_snake_case = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCamelCase__: List[str] = None
def _UpperCamelCase ( snake_case__, snake_case__, ) -> str:
import pyspark
def generate_fn():
__UpperCAmelCase : int = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
__UpperCAmelCase : List[Any] = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
__UpperCAmelCase : Optional[int] = partition_df.collect()
__UpperCAmelCase : Any = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self: List[Any] , __lowerCamelCase: "pyspark.sql.DataFrame" , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : List[str] = df
__UpperCAmelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
__UpperCAmelCase : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self: str ) -> Tuple:
yield from self.generate_examples_fn()
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: np.random.Generator ) -> int:
__UpperCAmelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
__UpperCAmelCase : str = self.split_shard_indices_by_worker(__snake_case , __snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
@property
def _lowerCamelCase ( self: int ) -> Optional[int]:
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCamelCase__: Union[str, Any] = SparkConfig
def __init__( self: Tuple , __lowerCamelCase: "pyspark.sql.DataFrame" , __lowerCamelCase: str = None , __lowerCamelCase: str = None , **__lowerCamelCase: Tuple , ) -> Optional[int]:
import pyspark
__UpperCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
__UpperCAmelCase : Union[str, Any] = df
__UpperCAmelCase : Optional[int] = working_dir
super().__init__(
cache_dir=__snake_case , config_name=str(self.df.semanticHash() ) , **__snake_case , )
def _lowerCamelCase ( self: Dict ) -> str:
# Returns the path of the created file.
def create_cache_and_write_probe(__lowerCamelCase: Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__snake_case )
__UpperCAmelCase : Dict = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__snake_case , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__UpperCAmelCase : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: datasets.download.download_manager.DownloadManager ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] ) -> Optional[Any]:
import pyspark
def get_arrow_batch_size(__lowerCamelCase: List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
__UpperCAmelCase : List[Any] = self.df.count()
__UpperCAmelCase : Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__UpperCAmelCase : str = (
self.df.limit(__snake_case )
.repartition(1 )
.mapInArrow(__snake_case , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__UpperCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__UpperCAmelCase : Optional[int] = min(__snake_case , int(approx_total_size / max_shard_size ) )
__UpperCAmelCase : Dict = self.df.repartition(__snake_case )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , ) -> Dict:
import pyspark
__UpperCAmelCase : Optional[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
__UpperCAmelCase : Any = os.path.join(self._working_dir , os.path.basename(__snake_case ) ) if self._working_dir else fpath
__UpperCAmelCase : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__UpperCAmelCase : Optional[Any] = self.config.features
__UpperCAmelCase : Optional[Any] = self._writer_batch_size
__UpperCAmelCase : List[str] = self._fs.storage_options
def write_arrow(__lowerCamelCase: str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__UpperCAmelCase : Any = pyspark.TaskContext().taskAttemptId()
__UpperCAmelCase : int = next(__snake_case , __snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[Any] = writer_class(
features=__snake_case , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
__UpperCAmelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__UpperCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
__UpperCAmelCase : Dict = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
__UpperCAmelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(__snake_case )
if writer._num_bytes > 0:
__UpperCAmelCase : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__snake_case ) ):
__UpperCAmelCase : List[str] = os.path.join(os.path.dirname(__snake_case ) , os.path.basename(__snake_case ) )
shutil.move(__snake_case , __snake_case )
__UpperCAmelCase : Dict = (
self.df.mapInArrow(__snake_case , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _lowerCamelCase ( self: Dict , __lowerCamelCase: "datasets.SplitGenerator" , __lowerCamelCase: str = "arrow" , __lowerCamelCase: Optional[Union[str, int]] = None , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
self._validate_cache_dir()
__UpperCAmelCase : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__snake_case )
__UpperCAmelCase : Union[str, Any] = not is_remote_filesystem(self._fs )
__UpperCAmelCase : List[Any] = os.path.join if is_local else posixpath.join
__UpperCAmelCase : Any = '-TTTTT-SSSSS-of-NNNNN'
__UpperCAmelCase : List[str] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__UpperCAmelCase : int = path_join(self._output_dir , __snake_case )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Any = []
for task_id, content in self._prepare_split_single(__snake_case , __snake_case , __snake_case ):
(
__UpperCAmelCase
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__snake_case )
__UpperCAmelCase : Union[str, Any] = total_num_examples
__UpperCAmelCase : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__UpperCAmelCase : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__UpperCAmelCase : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , ):
rename(
__snake_case , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : str = 0
for i in range(len(__snake_case ) ):
__UpperCAmelCase : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(__snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__snake_case , len(__snake_case ) ).map(lambda __lowerCamelCase : _rename_shard(*__snake_case ) ).collect()
else:
# don't use any pattern
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(__snake_case , "" ) , )
def _lowerCamelCase ( self: str , __lowerCamelCase: "datasets.SplitGenerator" , ) -> Dict:
return SparkExamplesIterable(self.df )
| 358 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any = 1 , __lowerCamelCase: Dict = 4 , ) -> Optional[int]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A__ , hypotheses=A__ , min_len=A__ , max_len=A__ )
}
| 359 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _snake_case :
def __init__( self: List[str] , __lowerCamelCase: int , ) -> Tuple:
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Any = 13
__UpperCAmelCase : List[Any] = 7
__UpperCAmelCase : Tuple = 30
__UpperCAmelCase : Union[str, Any] = self.seq_length + self.mem_len
__UpperCAmelCase : List[Any] = 15
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = True
__UpperCAmelCase : List[Any] = 99
__UpperCAmelCase : Any = [10, 50, 80]
__UpperCAmelCase : Union[str, Any] = 32
__UpperCAmelCase : List[str] = 32
__UpperCAmelCase : int = 4
__UpperCAmelCase : List[Any] = 8
__UpperCAmelCase : Union[str, Any] = 1_28
__UpperCAmelCase : str = 2
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Optional[Any] = self.vocab_size - 1
__UpperCAmelCase : Union[str, Any] = 0.01
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCamelCase ( self: str ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ) -> Any:
__UpperCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple()
__UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict ) -> int:
__UpperCAmelCase : Optional[Any] = TFTransfoXLLMHeadModel(a__ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = model(a__ ).to_tuple()
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
__UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = TFTransfoXLForSequenceClassification(a__ )
__UpperCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _snake_case ( __a , __a , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__: Optional[Any] = () if is_tf_available() else ()
lowerCamelCase__: Any = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__: List[str] = False
lowerCamelCase__: str = False
lowerCamelCase__: str = False
lowerCamelCase__: List[Any] = False
def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ) -> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCamelCase ( self: int ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = TFTransfoXLModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> str:
self.model_tester.set_seed()
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
self.model_tester.set_seed()
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def _lowerCamelCase ( self: Optional[int] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
__UpperCAmelCase : List[str] = model.get_bias()
assert name is None
else:
__UpperCAmelCase : int = model.get_output_embeddings()
assert x is None
__UpperCAmelCase : List[Any] = model.get_bias()
assert name is None
def _lowerCamelCase ( self: Dict ) -> str:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _lowerCamelCase ( self: List[Any] ) -> str:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="This model doesn\'t play well with fit() due to not returning a single loss." )
def _lowerCamelCase ( self: Any ) -> List[str]:
pass
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _lowerCamelCase ( self: int ) -> Tuple:
__UpperCAmelCase : Dict = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__UpperCAmelCase : Dict = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__UpperCAmelCase : Union[str, Any] = model.generate(a__ , max_length=2_00 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 360 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase__: Any = AlbertTokenizer
lowerCamelCase__: Dict = AlbertTokenizerFast
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Dict = True
lowerCamelCase__: Union[str, Any] = True
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Union[str, Any] = AlbertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Tuple = '''this is a test'''
__UpperCAmelCase : Optional[int] = '''this is a test'''
return input_text, output_text
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase : Optional[Any] = '''<pad>'''
__UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__A ) , 3_00_00 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _lowerCamelCase ( self: Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = '''I was born in 92000, and this is falsé.'''
__UpperCAmelCase : int = tokenizer.tokenize(__A )
__UpperCAmelCase : Dict = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__UpperCAmelCase : List[str] = tokenizer.encode(__A , add_special_tokens=__A )
__UpperCAmelCase : List[str] = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
__UpperCAmelCase : str = self.get_rust_tokenizer()
__UpperCAmelCase : List[Any] = tokenizer.encode(__A )
__UpperCAmelCase : Dict = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _lowerCamelCase ( self: Tuple ) -> Tuple:
__UpperCAmelCase : Dict = AlbertTokenizer(__A , keep_accents=__A )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [48, 25, 21, 12_89] )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = AlbertTokenizer(__A )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" )
__UpperCAmelCase : Tuple = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__A )
__UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self: Dict ) -> Any:
# fmt: off
__UpperCAmelCase : List[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
__UpperCAmelCase : int = model_name.find("patch" )
__UpperCAmelCase : Tuple = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
__UpperCAmelCase : Union[str, Any] = XCLIPVisionConfig(patch_size=__snake_case, num_frames=__snake_case )
if "large" in model_name:
__UpperCAmelCase : List[Any] = 768
__UpperCAmelCase : Dict = 3072
__UpperCAmelCase : Dict = 12
__UpperCAmelCase : str = 1024
__UpperCAmelCase : List[Any] = 4096
__UpperCAmelCase : Union[str, Any] = 16
__UpperCAmelCase : Any = 24
__UpperCAmelCase : Dict = 768
__UpperCAmelCase : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
__UpperCAmelCase : int = 336
__UpperCAmelCase : str = XCLIPConfig.from_text_vision_configs(__snake_case, __snake_case )
if "large" in model_name:
__UpperCAmelCase : str = 768
return config
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
if name == "token_embedding.weight":
__UpperCAmelCase : Tuple = name.replace("token_embedding.weight", "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
__UpperCAmelCase : List[Any] = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("ln_1", "layer_norm1" )
if "ln_2" in name:
__UpperCAmelCase : Any = name.replace("ln_2", "layer_norm2" )
if "c_fc" in name:
__UpperCAmelCase : List[str] = name.replace("c_fc", "fc1" )
if "c_proj" in name:
__UpperCAmelCase : str = name.replace("c_proj", "fc2" )
if name.startswith("transformer.resblocks" ):
__UpperCAmelCase : str = name.replace("transformer.resblocks", "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
__UpperCAmelCase : List[str] = name.replace("attn.out_proj", "self_attn.out_proj" )
if "ln_final" in name:
__UpperCAmelCase : Tuple = name.replace("ln_final", "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
__UpperCAmelCase : Dict = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
__UpperCAmelCase : Optional[Any] = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
__UpperCAmelCase : Any = name.replace("visual.transformer.resblocks", "vision_model.encoder.layers" )
if "visual.conv1" in name:
__UpperCAmelCase : Tuple = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("visual.ln_pre", "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
__UpperCAmelCase : Dict = name.replace("visual.ln_post", "vision_model.post_layernorm" )
if "visual.proj" in name:
__UpperCAmelCase : Tuple = name.replace("visual.proj", "visual_projection.weight" )
if "text_projection" in name:
__UpperCAmelCase : List[str] = name.replace("text_projection", "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
__UpperCAmelCase : Optional[int] = name.replace("prompts_visual_proj", "prompts_visual_projection" )
if "prompts_visual_ln" in name:
__UpperCAmelCase : Dict = name.replace("prompts_visual_ln", "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
__UpperCAmelCase : Union[str, Any] = name.replace("positional", "position" )
if name.startswith("mit.resblocks" ):
__UpperCAmelCase : List[str] = name.replace("mit.resblocks", "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
__UpperCAmelCase : List[Any] = name.replace("prompts_generator.norm", "prompts_generator.layernorm" )
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : List[str] = orig_state_dict.pop(__snake_case )
if "attn.in_proj" in key:
__UpperCAmelCase : Optional[int] = key.split("." )
if key.startswith("visual" ):
__UpperCAmelCase : Union[str, Any] = key_split[3]
__UpperCAmelCase : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__UpperCAmelCase : int = val[
:dim, :
]
__UpperCAmelCase : List[str] = val[
dim : dim * 2, :
]
__UpperCAmelCase : Dict = val[
-dim:, :
]
else:
__UpperCAmelCase : str = val[
:dim
]
__UpperCAmelCase : int = val[
dim : dim * 2
]
__UpperCAmelCase : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
__UpperCAmelCase : Optional[Any] = val[
:dim, :
]
__UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
__UpperCAmelCase : int = val[
-dim:, :
]
else:
__UpperCAmelCase : Union[str, Any] = val[:dim]
__UpperCAmelCase : str = val[
dim : dim * 2
]
__UpperCAmelCase : Dict = val[-dim:]
elif key.startswith("mit" ):
__UpperCAmelCase : int = key_split[2]
__UpperCAmelCase : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__UpperCAmelCase : Union[str, Any] = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : Any = val[-dim:, :]
else:
__UpperCAmelCase : List[Any] = val[:dim]
__UpperCAmelCase : List[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : Any = key_split[2]
__UpperCAmelCase : Any = config.text_config.hidden_size
if "weight" in key:
__UpperCAmelCase : int = val[:dim, :]
__UpperCAmelCase : int = val[
dim : dim * 2, :
]
__UpperCAmelCase : Tuple = val[-dim:, :]
else:
__UpperCAmelCase : str = val[:dim]
__UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
__UpperCAmelCase : Optional[int] = val[-dim:]
else:
__UpperCAmelCase : Optional[int] = rename_key(__snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__UpperCAmelCase : Dict = val.T
__UpperCAmelCase : Tuple = val
return orig_state_dict
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
if num_frames == 8:
__UpperCAmelCase : Any = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__UpperCAmelCase : List[str] = "eating_spaghetti.npy"
elif num_frames == 32:
__UpperCAmelCase : Union[str, Any] = "eating_spaghetti_32_frames.npy"
__UpperCAmelCase : str = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename=__snake_case, repo_type="dataset", )
__UpperCAmelCase : str = np.load(__snake_case )
return list(__snake_case )
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__UpperCAmelCase : Dict = model_to_url[model_name]
__UpperCAmelCase : Tuple = 8
if "16-frames" in model_name:
__UpperCAmelCase : Optional[Any] = 16
elif "shot" in model_name:
__UpperCAmelCase : Union[str, Any] = 32
__UpperCAmelCase : Any = get_xclip_config(__snake_case, __snake_case )
__UpperCAmelCase : Optional[int] = XCLIPModel(__snake_case )
model.eval()
if "drive" in checkpoint_url:
__UpperCAmelCase : Dict = "pytorch_model.bin"
gdown.cached_download(__snake_case, __snake_case, quiet=__snake_case )
__UpperCAmelCase : str = torch.load(__snake_case, map_location="cpu" )["model"]
else:
__UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(__snake_case )["model"]
__UpperCAmelCase : str = convert_state_dict(__snake_case, __snake_case )
__UpperCAmelCase : Optional[int] = XCLIPModel(__snake_case )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model.load_state_dict(__snake_case, strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__UpperCAmelCase : Optional[Any] = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__UpperCAmelCase : List[str] = VideoMAEImageProcessor(size=__snake_case )
__UpperCAmelCase : str = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
__UpperCAmelCase : Tuple = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
__UpperCAmelCase : Tuple = XCLIPProcessor(image_processor=__snake_case, tokenizer=__snake_case )
__UpperCAmelCase : List[Any] = prepare_video(__snake_case )
__UpperCAmelCase : Tuple = processor(
text=["playing sports", "eating spaghetti", "go shopping"], videos=__snake_case, return_tensors="pt", padding=__snake_case )
print("Shape of pixel values:", inputs.pixel_values.shape )
with torch.no_grad():
__UpperCAmelCase : str = model(**__snake_case )
# Verify outputs
__UpperCAmelCase : Any = outputs.logits_per_video
__UpperCAmelCase : Union[str, Any] = logits_per_video.softmax(dim=1 )
print("Probs:", __snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__UpperCAmelCase : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__UpperCAmelCase : List[str] = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
__UpperCAmelCase : str = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__UpperCAmelCase : Optional[int] = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
__UpperCAmelCase : Optional[int] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__UpperCAmelCase : Tuple = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__UpperCAmelCase : Dict = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__UpperCAmelCase : Tuple = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__UpperCAmelCase : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__UpperCAmelCase : Any = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__UpperCAmelCase : Optional[Any] = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__UpperCAmelCase : Union[str, Any] = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__UpperCAmelCase : Tuple = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__UpperCAmelCase : Dict = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__UpperCAmelCase : Dict = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__UpperCAmelCase : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__UpperCAmelCase : Dict = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__UpperCAmelCase : List[str] = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(__snake_case, __snake_case, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(__snake_case, organization="nielsr" )
processor.push_to_hub(__snake_case, organization="nielsr" )
slow_tokenizer.push_to_hub(__snake_case, organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 362 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : str = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__UpperCAmelCase : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
__UpperCAmelCase : int = model.state_dict()
def to_tf_var_name(snake_case__ ):
for patt, repl in iter(UpperCAmelCase__ ):
__UpperCAmelCase : List[Any] = name.replace(UpperCAmelCase__, UpperCAmelCase__ )
return f'''bert/{name}'''
def create_tf_var(snake_case__, snake_case__, snake_case__ ):
__UpperCAmelCase : str = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase : Optional[int] = tf.get_variable(dtype=UpperCAmelCase__, shape=tensor.shape, name=UpperCAmelCase__, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase : Dict = to_tf_var_name(UpperCAmelCase__ )
__UpperCAmelCase : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase : str = torch_tensor.T
__UpperCAmelCase : Dict = create_tf_var(tensor=UpperCAmelCase__, name=UpperCAmelCase__, session=UpperCAmelCase__ )
tf.keras.backend.set_value(UpperCAmelCase__, UpperCAmelCase__ )
__UpperCAmelCase : int = session.run(UpperCAmelCase__ )
print(f'''Successfully created {tf_name}: {np.allclose(UpperCAmelCase__, UpperCAmelCase__ )}''' )
__UpperCAmelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase__, os.path.join(UpperCAmelCase__, model_name.replace("-", "_" ) + ".ckpt" ) )
def _UpperCamelCase ( snake_case__=None ) -> Union[str, Any]:
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--model_name", type=UpperCAmelCase__, required=UpperCAmelCase__, help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir", type=UpperCAmelCase__, default=UpperCAmelCase__, required=UpperCAmelCase__, help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path", type=UpperCAmelCase__, required=UpperCAmelCase__, help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir", type=UpperCAmelCase__, required=UpperCAmelCase__, help="Directory in which to save tensorflow model" )
__UpperCAmelCase : int = parser.parse_args(UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase__, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 363 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( snake_case__ ):
def __init__( self: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Dict = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple = 5_12 , __lowerCamelCase: Optional[Any] = 5_12 , __lowerCamelCase: int = 50 , __lowerCamelCase: int = 7.5 , __lowerCamelCase: List[str] = None , __lowerCamelCase: List[Any] = 1 , __lowerCamelCase: List[str] = 0.0 , __lowerCamelCase: str = None , __lowerCamelCase: Dict = None , __lowerCamelCase: int = "pil" , __lowerCamelCase: Optional[Any] = True , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Dict = 1 , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Union[str, Any] , ) -> Tuple:
if isinstance(_A , _A ):
__UpperCAmelCase : Optional[int] = 1
elif isinstance(_A , _A ):
__UpperCAmelCase : str = len(_A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_A )}.''' )
# get prompt text embeddings
__UpperCAmelCase : str = self.tokenizer(
_A , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__UpperCAmelCase : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__UpperCAmelCase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__UpperCAmelCase : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = text_embeddings.shape
__UpperCAmelCase : Dict = text_embeddings.repeat(1 , _A , 1 )
__UpperCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__UpperCAmelCase : List[str] = 42
if negative_prompt is None:
__UpperCAmelCase : List[Any] = [""]
elif type(_A ) is not type(_A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='''
f''' {type(_A )}.''' )
elif isinstance(_A , _A ):
__UpperCAmelCase : Any = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
__UpperCAmelCase : str = negative_prompt
__UpperCAmelCase : int = text_input_ids.shape[-1]
__UpperCAmelCase : Tuple = self.tokenizer(
_A , padding="max_length" , max_length=_A , truncation=_A , return_tensors="pt" , )
__UpperCAmelCase : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase : Optional[int] = uncond_embeddings.shape[1]
__UpperCAmelCase : Optional[int] = uncond_embeddings.repeat(_A , _A , 1 )
__UpperCAmelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__UpperCAmelCase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__UpperCAmelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__UpperCAmelCase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__UpperCAmelCase : List[Any] = torch.randn(
_A , generator=_A , device="cpu" , dtype=_A ).to(self.device )
__UpperCAmelCase : int = torch.randn(_A , generator=_A , device="cpu" , dtype=_A ).to(
self.device )
else:
__UpperCAmelCase : List[str] = torch.randn(
_A , generator=_A , device=self.device , dtype=_A )
__UpperCAmelCase : Tuple = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__UpperCAmelCase : Any = latents_reference.to(self.device )
__UpperCAmelCase : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__UpperCAmelCase : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2
__UpperCAmelCase : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__UpperCAmelCase : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__UpperCAmelCase : List[str] = 0 if dx < 0 else dx
__UpperCAmelCase : List[str] = 0 if dy < 0 else dy
__UpperCAmelCase : List[str] = max(-dx , 0 )
__UpperCAmelCase : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__UpperCAmelCase : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__UpperCAmelCase : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase : Dict = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase : List[str] = {}
if accepts_eta:
__UpperCAmelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : Optional[int] = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
__UpperCAmelCase : Optional[int] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : str = noise_pred.chunk(2 )
__UpperCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
__UpperCAmelCase : List[str] = 1 / 0.1_82_15 * latents
__UpperCAmelCase : str = self.vae.decode(_A ).sample
__UpperCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(self.numpy_to_pil(_A ) , return_tensors="pt" ).to(
self.device )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.safety_checker(
images=_A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__UpperCAmelCase : Any = None
if output_type == "pil":
__UpperCAmelCase : Tuple = self.numpy_to_pil(_A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 364 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 0 |
def _UpperCamelCase ( snake_case__ ) -> list[list[float]]:
__UpperCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(snake_case__ ):
if len(snake_case__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(snake_case__ ) )
return data_lists
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[list[float]]:
__UpperCAmelCase : list[list[float]] = []
for dlist, weight in zip(snake_case__, snake_case__ ):
__UpperCAmelCase : Optional[int] = min(snake_case__ )
__UpperCAmelCase : Optional[int] = max(snake_case__ )
__UpperCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__UpperCAmelCase : Tuple = f'''Invalid weight of {weight:f} provided'''
raise ValueError(snake_case__ )
score_lists.append(snake_case__ )
return score_lists
def _UpperCamelCase ( snake_case__ ) -> list[float]:
__UpperCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(snake_case__ ):
__UpperCAmelCase : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[list[float]]:
__UpperCAmelCase : Optional[Any] = get_data(snake_case__ )
__UpperCAmelCase : Tuple = calculate_each_score(snake_case__, snake_case__ )
__UpperCAmelCase : Dict = generate_final_scores(snake_case__ )
# append scores to source data
for i, ele in enumerate(snake_case__ ):
source_data[i].append(snake_case__ )
return source_data
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_snake_case = "."
if __name__ == "__main__":
_snake_case = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_snake_case = []
_snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
_snake_case = line.strip()
_snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_snake_case = "\n".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCamelCase__: Tuple = StableDiffusionSAGPipeline
lowerCamelCase__: Tuple = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__: Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__: List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__: Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: str ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase : Tuple = CLIPTextModel(__lowercase )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=0 ) -> Union[str, Any]:
if str(__lowercase ).startswith("mps" ):
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Any = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self: str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : str = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__UpperCAmelCase : Tuple = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = '''.'''
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__UpperCAmelCase : List[str] = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = '''.'''
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase : Optional[int] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowerCamelCase ( self: str ) -> Dict:
__UpperCAmelCase : Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__UpperCAmelCase : Optional[Any] = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = '''.'''
__UpperCAmelCase : int = torch.manual_seed(0 )
__UpperCAmelCase : Any = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
__UpperCAmelCase : Optional[int] = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
from __future__ import annotations
import math
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, snake_case_, snake_case_, snake_case_ ), minimax(depth + 1, node_index * 2 + 1, snake_case_, snake_case_, snake_case_ ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, snake_case_, snake_case_, snake_case_ ), minimax(depth + 1, node_index * 2 + 1, snake_case_, snake_case_, snake_case_ ), )
)
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__UpperCAmelCase : Any = math.log(len(snake_case_ ), 2 )
print(f'''Optimal value : {minimax(0, 0, snake_case_, snake_case_, snake_case_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( _UpperCamelCase ):
lowerCamelCase__: int = 'M-CLIP'
def __init__( self: Dict , __lowerCamelCase: int=10_24 , __lowerCamelCase: List[str]=7_68 , **__lowerCamelCase: List[str] ) -> str:
__UpperCAmelCase : Optional[Any] = transformerDimSize
__UpperCAmelCase : str = imageDimSize
super().__init__(**_UpperCAmelCase )
class _snake_case ( _UpperCamelCase ):
lowerCamelCase__: Tuple = MCLIPConfig
def __init__( self: List[Any] , __lowerCamelCase: str , *__lowerCamelCase: Tuple , **__lowerCamelCase: List[str] ) -> Any:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase : Tuple = XLMRobertaModel(_UpperCAmelCase )
__UpperCAmelCase : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Any ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.transformer(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_UpperCAmelCase ), embs
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_snake_case = threading.Lock()
_snake_case = None
_snake_case = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_snake_case = logging.WARNING
_snake_case = True
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : List[str] = os.getenv("TRANSFORMERS_VERBOSITY", __SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def _UpperCamelCase ( ) -> str:
return __name__.split("." )[0]
def _UpperCamelCase ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def _UpperCamelCase ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__UpperCAmelCase : str = logging.StreamHandler() # Set sys.stderr as stream.
__UpperCAmelCase : Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
__UpperCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__UpperCAmelCase : List[Any] = False
def _UpperCamelCase ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
__UpperCAmelCase : Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__UpperCAmelCase : str = None
def _UpperCamelCase ( ) -> int:
return log_levels
def _UpperCamelCase ( snake_case__ = None ) -> logging.Logger:
if name is None:
__UpperCAmelCase : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _UpperCamelCase ( snake_case__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> Any:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> Dict:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> str:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> Union[str, Any]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _UpperCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _UpperCamelCase ( snake_case__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( snake_case__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> None:
_configure_library_root_logger()
__UpperCAmelCase : Optional[Any] = False
def _UpperCamelCase ( ) -> None:
_configure_library_root_logger()
__UpperCAmelCase : Union[str, Any] = True
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : Any = _get_library_root_logger().handlers
for handler in handlers:
__UpperCAmelCase : str = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self, *snake_case__, **snake_case__ ) -> Any:
__UpperCAmelCase : Any = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", __SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
_snake_case = warning_advice
@functools.lru_cache(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self, *snake_case__, **snake_case__ ) -> List[str]:
self.warning(*__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
_snake_case = warning_once
class _snake_case :
def __init__( self: Optional[int] , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ) -> Union[str, Any]: # pylint: disable=unused-argument
__UpperCAmelCase : Dict = args[0] if args else None
def __iter__( self: Optional[Any] ) -> int:
return iter(self._iterator )
def __getattr__( self: Tuple , __lowerCamelCase: Tuple ) -> int:
def empty_fn(*__lowerCamelCase: str , **__lowerCamelCase: List[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: Dict ) -> List[str]:
return self
def __exit__( self: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> str:
return
class _snake_case :
def __call__( self: List[str] , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Union[str, Any] ) -> str:
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ )
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: List[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_snake_case = _tqdm_cls()
def _UpperCamelCase ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def _UpperCamelCase ( ) -> Dict:
global _tqdm_active
__UpperCAmelCase : Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def _UpperCamelCase ( ) -> int:
global _tqdm_active
__UpperCAmelCase : str = False
hf_hub_utils.disable_progress_bars()
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.