code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A : str ='''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, ) -> Union[str, Any]:
if attention_mask is None:
_lowercase : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_lowercase : Any = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_lowercase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=99 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : List[Any]=0.02 , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = parent
_lowercase : int = batch_size
_lowercase : Tuple = seq_length
_lowercase : Any = is_training
_lowercase : List[str] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : int = eos_token_id
_lowercase : Dict = pad_token_id
_lowercase : Optional[int] = bos_token_id
_lowercase : Dict = initializer_range
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : str = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
_lowercase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
_lowercase : Dict = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> Any:
'''simple docstring'''
_lowercase : List[str] = 20
_lowercase : Tuple = model_class_name(UpperCamelCase_ )
_lowercase : str = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
_lowercase : Optional[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = 20
_lowercase : Any = model_class_name(UpperCamelCase_ )
_lowercase : List[str] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Tuple = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
_lowercase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Any = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
_lowercase : int = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
_lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = 99
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_lowercase : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowercase : Optional[Any] = input_ids.shape[0]
_lowercase : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase : Dict = self._get_config_and_data()
_lowercase : str = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
_lowercase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
_lowercase : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowercase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
_lowercase : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_lowercase : str = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
_lowercase : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_lowercase : Optional[Any] = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
_lowercase : Union[str, Any] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
_lowercase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( lowercase__ , unittest.TestCase , lowercase__ ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Any = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[Any] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('JIT Enabled' ):
_lowercase : Optional[int] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Optional[int] = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : str = model_class(UpperCamelCase_ )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('JIT Enabled' ):
_lowercase : int = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Any = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : Union[str, Any] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_lowercase : Union[str, Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_lowercase : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=UpperCamelCase_ )
_lowercase : int = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
_lowercase : Any = ['Sam']
_lowercase : Any = tokenizer(UpperCamelCase_ , return_tensors='jax' )
_lowercase : Any = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = 'Sam is a great name. It means \"sun\" in Gaelic.'
_lowercase : int = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_A : List[str] =logging.getLogger(__name__)
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name', type=_lowercase, default='wikitext', help='Name of the training. Explore datasets at: hf.co/datasets.', )
parser.add_argument(
'--dataset_config', type=_lowercase, default='wikitext-103-raw-v1', help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path', type=_lowercase, default='sayakpaul/unigram-tokenizer-wikitext', help='Tokenizer identifier. Can be a local filepath or a Hub identifier.', )
parser.add_argument(
'--shard_size', type=_lowercase, default=1000, help='Number of entries to go in a single shard.', )
parser.add_argument('--split', type=_lowercase, default='train', choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit', default=_lowercase, type=_lowercase, help='Limit the number of shards (used for debugging).', )
parser.add_argument(
'--max_length', type=_lowercase, default=512, help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.', )
parser.add_argument(
'--output_dir', default='tf-tpu', type=_lowercase, help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.', )
_lowercase : Tuple = parser.parse_args()
return args
def __UpperCamelCase ( _lowercase ) -> str:
'''simple docstring'''
def fn(_lowercase ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _lowercase ) -> List[Any]:
'''simple docstring'''
_lowercase : str = []
for i in range(len(tokenized_data['input_ids'] ) ):
_lowercase : int = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_lowercase : List[str] = tf.train.Features(feature=_lowercase )
_lowercase : Dict = tf.train.Example(features=_lowercase )
_lowercase : Tuple = example.SerializeToString()
records.append(_lowercase )
return records
def __UpperCamelCase ( _lowercase ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_lowercase : Dict = min(len(_lowercase ), args.limit )
_lowercase : Any = dataset.select(range(_lowercase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
_lowercase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowercase : Optional[Any] = os.path.join(args.output_dir, args.split )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
_lowercase : List[Any] = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_lowercase : List[Any] = tokenize_function(_lowercase )
_lowercase : Any = dataset.map(_lowercase, batched=_lowercase, num_proc=4, remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowercase ):
# Concatenate all texts.
_lowercase : Tuple = {k: sum(examples[k], [] ) for k in examples.keys()}
_lowercase : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowercase : str = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowercase : Dict = {
k: [t[i : i + args.max_length] for i in range(0, _lowercase, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowercase : Any = dataset_tokenized.map(_lowercase, batched=_lowercase, batch_size=1000, num_proc=4 )
_lowercase : Union[str, Any] = 0
_lowercase : Union[str, Any] = 0
for shard in range(0, len(_lowercase ), args.shard_size ):
_lowercase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
_lowercase : Any = len(dataset_snapshot['input_ids'] )
_lowercase : Any = os.path.join(_lowercase, f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowercase : Optional[int] = get_serialized_examples(_lowercase )
with tf.io.TFRecordWriter(_lowercase ) as out_file:
for i in range(len(_lowercase ) ):
_lowercase : Dict = serialized_examples[i]
out_file.write(_lowercase )
print('Wrote file {} containing {} records'.format(_lowercase, _lowercase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''', 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''', file=_lowercase )
if __name__ == "__main__":
_A : Optional[int] =parse_args()
main(args)
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_A : Optional[int] ="""\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_A : str ="""\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_A : str ="""
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]="auto" , UpperCamelCase_ : Optional[Any]=-1 , UpperCamelCase_ : Optional[Any]=0.9 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : List[Any]=500 , UpperCamelCase_ : Tuple="gpt2-large" , UpperCamelCase_ : Optional[Any]=-1 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : Tuple=25 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=25 , ) -> int:
'''simple docstring'''
_lowercase : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCamelCase__ ( lowercase__ ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = self._create_example_records()
_lowercase : Optional[int] = Dataset.from_list(UpperCamelCase_ )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(UpperCamelCase_ ):
self.assertDictEqual(UpperCamelCase_ , example_records[i] )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = self._create_example_records()
_lowercase : Tuple = Dataset.from_list(UpperCamelCase_ )
_lowercase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: # checks what happens with missing columns
'''simple docstring'''
_lowercase : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
_lowercase : List[Any] = Dataset.from_list(UpperCamelCase_ )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def __UpperCAmelCase ( self : Tuple ) -> List[str]: # checks if the type can be inferred from the second record
'''simple docstring'''
_lowercase : Any = [{"col_1": []}, {"col_1": [1, 2]}]
_lowercase : List[str] = Dataset.from_list(UpperCamelCase_ )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Union[str, Any] = filter(lambda _lowercase : p.requires_grad, model.parameters() )
_lowercase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A : Dict =logging.getLogger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if metric == "rouge2":
_lowercase : Union[str, Any] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_lowercase : List[Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_lowercase : Optional[int] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
_lowercase : Union[str, Any] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowercase : List[str] = ModelCheckpoint(
dirpath=_lowercase, filename=_lowercase, monitor=f'''val_{metric}''', mode='max', save_top_k=1, every_n_epochs=1, )
return checkpoint_callback
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[Any]:
return EarlyStopping(
monitor=f'''val_{metric}''', mode='min' if 'loss' in metric else 'max', patience=_lowercase, verbose=_lowercase, )
class lowerCamelCase__ ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def __UpperCAmelCase ( self : str , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule , UpperCamelCase_ : str , UpperCamelCase_ : str=True ) -> Dict:
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowercase : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowercase : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowercase : Optional[Any] = od / """test_results.txt"""
_lowercase : Any = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowercase : Dict = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowercase : Dict = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , 'a+' ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowercase : Dict = metrics[key]
if isinstance(_a , torch.Tensor ):
_lowercase : Any = val.item()
_lowercase : Optional[int] = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_lowercase : List[str] = """\n""".join(metrics['preds'] )
generations_file.open('w+' ).write(_a )
@rank_zero_only
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
try:
_lowercase : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_lowercase : str = pl_module.model.num_parameters()
_lowercase : Dict = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> str:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , 'test' )
@rank_zero_only
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> int: # This function is recursive
_lowercase : Optional[int] = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowercase : Dict = array[0]
_lowercase : List[Any] = False
_lowercase : Any = 1
_lowercase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowercase : int = True
_lowercase : str = [element for element in array[i:] if element >= array[i]]
_lowercase : Optional[int] = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
_lowercase : str = temp_array
else:
i += 1
_lowercase : Optional[int] = [element for element in array[1:] if element >= pivot]
_lowercase : Union[str, Any] = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 42
@flax_register_to_config
class lowerCamelCase__ ( nn.Module , A , A ):
'''simple docstring'''
A_ = 32
A_ = 4
A_ = 4
A_ = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
A_ = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
A_ = False
A_ = (320, 640, 1280, 1280)
A_ = 2
A_ = 8
A_ = None
A_ = 1280
A_ = 0.0
A_ = False
A_ = jnp.floataa
A_ = True
A_ = 0
A_ = False
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowercase : Tuple = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
_lowercase : int = jnp.ones((1,) , dtype=jnp.intaa )
_lowercase : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_lowercase , _lowercase : int = jax.random.split(__lowerCAmelCase )
_lowercase : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = self.block_out_channels
_lowercase : Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowercase : Dict = self.num_attention_heads or self.attention_head_dim
# input
_lowercase : List[str] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_lowercase : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_lowercase : List[str] = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
_lowercase : List[str] = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowercase : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowercase : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_lowercase : Union[str, Any] = []
_lowercase : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_lowercase : Optional[int] = output_channel
_lowercase : Tuple = block_out_channels[i]
_lowercase : List[str] = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowercase : Any = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowercase : Optional[Any] = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
_lowercase : Optional[int] = down_blocks
# mid
_lowercase : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_lowercase : int = []
_lowercase : List[Any] = list(reversed(__lowerCAmelCase ) )
_lowercase : int = list(reversed(__lowerCAmelCase ) )
_lowercase : Dict = list(reversed(__lowerCAmelCase ) )
_lowercase : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_lowercase : Dict = output_channel
_lowercase : Optional[int] = reversed_block_out_channels[i]
_lowercase : List[Any] = reversed_block_out_channels[min(i + 1 , len(__lowerCAmelCase ) - 1 )]
_lowercase : str = i == len(__lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowercase : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowercase : List[str] = FlaxUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowerCAmelCase )
_lowercase : Union[str, Any] = output_channel
_lowercase : Union[str, Any] = up_blocks
# out
_lowercase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowercase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple = True , UpperCamelCase_ : int = False , ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
_lowercase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_lowercase : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_lowercase : Dict = jnp.expand_dims(__lowerCAmelCase , 0 )
_lowercase : Union[str, Any] = self.time_proj(__lowerCAmelCase )
_lowercase : Union[str, Any] = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
_lowercase : Union[str, Any] = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
_lowercase : List[Any] = self.conv_in(__lowerCAmelCase )
# 3. down
_lowercase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowercase , _lowercase : Optional[int] = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
_lowercase , _lowercase : Optional[Any] = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowercase : Optional[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowerCAmelCase , __lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowercase : Dict = new_down_block_res_samples
# 4. mid
_lowercase : List[Any] = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowercase : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowercase : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowercase : str = up_block(
__lowerCAmelCase , temb=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train , )
else:
_lowercase : Optional[Any] = up_block(__lowerCAmelCase , temb=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train )
# 6. post-process
_lowercase : Optional[Any] = self.conv_norm_out(__lowerCAmelCase )
_lowercase : int = nn.silu(__lowerCAmelCase )
_lowercase : str = self.conv_out(__lowerCAmelCase )
_lowercase : Tuple = jnp.transpose(__lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowerCAmelCase )
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
_A : Union[str, Any] ="2020.9.26"
_A : Any ="xcodz-dot, cclaus, dhruvmanila"
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase__, (float, int) ) for val in locals().values() ):
_lowercase : str = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(UpperCAmelCase__ )
_lowercase : str = ((x * distance) / (z + distance)) * scale
_lowercase : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise TypeError('Axis must be a str' )
_lowercase : int = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase__, (float, int) ) for val in input_variables.values() ):
_lowercase : List[Any] = (
"""Input values except axis must either be float or int: """
f'''{list(input_variables.values() )}'''
)
raise TypeError(UpperCAmelCase__ )
_lowercase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_lowercase : Union[str, Any] = x * math.cos(UpperCAmelCase__ ) - y * math.sin(UpperCAmelCase__ )
_lowercase : Optional[Any] = y * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ )
_lowercase : Dict = z
elif axis == "x":
_lowercase : Dict = y * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ )
_lowercase : int = z * math.cos(UpperCAmelCase__ ) + y * math.sin(UpperCAmelCase__ )
_lowercase : str = x
elif axis == "y":
_lowercase : str = x * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ )
_lowercase : Any = z * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ )
_lowercase : Optional[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }''')
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Union[str, Any] = 1.5
_lowercase : List[str] = int(factor * num_class_images )
_lowercase : Optional[Any] = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=_lowercase, aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''', exist_ok=_lowercase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowercase : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_lowercase : Optional[int] = int(factor * num_images )
_lowercase : str = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=_lowercase, aesthetic_weight=0.1, )
_lowercase : Optional[int] = 0
_lowercase : str = 0
_lowercase : Any = tqdm(desc='downloading real regularization images', total=_lowercase )
with open(f'''{class_data_dir}/caption.txt''', 'w' ) as fa, open(f'''{class_data_dir}/urls.txt''', 'w' ) as fa, open(
f'''{class_data_dir}/images.txt''', 'w' ) as fa:
while total < num_class_images:
_lowercase : Optional[Any] = class_images[count]
count += 1
try:
_lowercase : str = requests.get(images['url'] )
if img.status_code == 200:
_lowercase : int = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''', 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCamelCase ( ) -> Dict:
_lowercase : Optional[int] = argparse.ArgumentParser('', add_help=_lowercase )
parser.add_argument('--class_prompt', help='text prompt to retrieve images', required=_lowercase, type=_lowercase )
parser.add_argument('--class_data_dir', help='path to save images', required=_lowercase, type=_lowercase )
parser.add_argument('--num_class_images', help='number of images to download', default=200, type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
_A : List[Any] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __UpperCamelCase ( _lowercase ) -> str:
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def __UpperCamelCase ( _lowercase ) -> int:
for char in word:
_lowercase : List[Any] = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
_lowercase : Union[str, Any] = set()
for token in tokens:
_lowercase : Tuple = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
_lowercase : Tuple = list(UpperCAmelCase__ )
return word_list
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_lowercase : List[Any] = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
_lowercase : List[Any] = bert_tokens
_lowercase , _lowercase : List[Any] = 0, len(UpperCAmelCase__ )
while start < end:
_lowercase : List[str] = True
if is_chinese(bert_word[start] ):
_lowercase : str = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
_lowercase : str = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_lowercase : Dict = '##' + bert_word[j]
_lowercase : Optional[int] = start + i
_lowercase : List[str] = False
break
if single_word:
start += 1
return bert_word
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
_lowercase : Tuple = []
for i in range(0, len(UpperCAmelCase__ ), 100 ):
_lowercase : int = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=['cws'] ).cws
_lowercase : Optional[Any] = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
_lowercase : List[str] = []
for i in range(0, len(UpperCAmelCase__ ), 100 ):
_lowercase : List[str] = bert_tokenizer(lines[i : i + 100], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
_lowercase : Tuple = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
_lowercase : Dict = []
for id in input_ids:
_lowercase : List[Any] = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
_lowercase : Any = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
_lowercase : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
_lowercase : List[str] = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def __UpperCamelCase ( _lowercase ) -> int:
with open(args.file_name, 'r', encoding='utf-8' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Tuple = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase : int = LTP(args.ltp ) # faster in GPU device
_lowercase : Dict = BertTokenizer.from_pretrained(args.bert )
_lowercase : Optional[int] = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, 'w', encoding='utf-8' ) as f:
_lowercase : List[str] = [json.dumps(UpperCAmelCase__ ) + '\n' for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_A : int =argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
_A : List[str] =parser.parse_args()
main(args)
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Union[str, Any] = str(_snake_case )
return n == n[::-1]
def __UpperCamelCase ( _lowercase = 100_0000 ) -> Any:
_lowercase : int = 0
for i in range(1, _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> Any: # noqa: E741
_lowercase : str = len(_lowercase )
_lowercase : Union[str, Any] = 0
_lowercase : Any = [0] * n
_lowercase : Dict = [False] * n
_lowercase : Optional[Any] = [False] * n
def dfs(_lowercase, _lowercase, _lowercase, _lowercase ):
if parent == root:
out_edge_count += 1
_lowercase : int = True
_lowercase : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowercase : Optional[int] = dfs(_lowercase, _lowercase, _lowercase, _lowercase )
_lowercase : Dict = min(low[at], low[to] )
# AP found via bridge
if at < low[to]:
_lowercase : str = True
# AP found via cycle
if at == low[to]:
_lowercase : int = True
else:
_lowercase : List[str] = min(low[at], _lowercase )
return out_edge_count
for i in range(_lowercase ):
if not visited[i]:
_lowercase : str = 0
_lowercase : Dict = dfs(_lowercase, _lowercase, -1, _lowercase )
_lowercase : str = out_edge_count > 1
for x in range(len(_lowercase ) ):
if is_art[x] is True:
print(_lowercase )
# Adjacency list of graph
_A : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Any:
'''simple docstring'''
_lowercase : int = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
import datasets
_lowercase : str = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_lowercase : Any = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_lowercase : Any = object_detector(_SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
pass
@require_torch
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_lowercase : Dict = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase : Tuple = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase : Any = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
_lowercase : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
_lowercase : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = 'facebook/detr-resnet-50'
_lowercase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
_lowercase : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_lowercase : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = 'facebook/detr-resnet-50'
_lowercase : List[str] = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
_lowercase : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_lowercase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : str = 0.99_85
_lowercase : List[str] = 'facebook/detr-resnet-50'
_lowercase : List[str] = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = 'Narsil/layoutlmv3-finetuned-funsd'
_lowercase : List[Any] = 0.99_93
_lowercase : Union[str, Any] = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=18 , UpperCamelCase_ : Tuple=30 , UpperCamelCase_ : Any=400 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=True , ) -> Any:
'''simple docstring'''
_lowercase : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
_lowercase : Optional[int] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = num_channels
_lowercase : Union[str, Any] = image_size
_lowercase : Dict = min_resolution
_lowercase : Union[str, Any] = max_resolution
_lowercase : List[Any] = do_resize
_lowercase : List[str] = size
_lowercase : Optional[Any] = apply_ocr
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , 'do_resize' ) )
self.assertTrue(hasattr(__lowercase , 'size' ) )
self.assertTrue(hasattr(__lowercase , 'apply_ocr' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
_lowercase : Any = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __lowercase )
self.assertIsInstance(encoding.boxes , __lowercase )
# Test batched
_lowercase : Optional[Any] = image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowercase : List[Any] = image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowercase : Tuple = image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowercase : int = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_lowercase : int = Image.open(ds[0]['file'] ).convert('RGB' )
_lowercase : Union[str, Any] = image_processing(__lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowercase : List[str] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowercase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowercase )
self.assertListEqual(encoding.boxes , __lowercase )
# with apply_OCR = False
_lowercase : Dict = LayoutLMvaImageProcessor(apply_ocr=__lowercase )
_lowercase : Any = image_processing(__lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
return math.pow(_lowerCAmelCase, 2 ) - a
def __UpperCamelCase ( _lowercase ) -> float:
return 2 * x
def __UpperCamelCase ( _lowercase ) -> float:
_lowercase : Dict = 2.0
while start <= a:
_lowercase : Dict = math.pow(_lowerCAmelCase, 2 )
return start
def __UpperCamelCase ( _lowercase, _lowercase = 9999, _lowercase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError('math domain error' )
_lowercase : str = get_initial_point(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
_lowercase : List[str] = value
_lowercase : Optional[int] = value - fx(_lowerCAmelCase, _lowerCAmelCase ) / fx_derivative(_lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ = UnCLIPImageVariationPipeline
A_ = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
A_ = IMAGE_VARIATION_BATCH_PARAMS
A_ = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
A_ = False
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_lowercase : int = UnCLIPTextProjModel(**UpperCamelCase_ )
return model
@property
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Tuple = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_lowercase : Dict = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(1 )
_lowercase : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Dict = self.dummy_decoder
_lowercase : Tuple = self.dummy_text_proj
_lowercase : Dict = self.dummy_text_encoder
_lowercase : Dict = self.dummy_tokenizer
_lowercase : Dict = self.dummy_super_res_first
_lowercase : Dict = self.dummy_super_res_last
_lowercase : List[str] = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_lowercase : str = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_lowercase : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
_lowercase : Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : List[Any]=True ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : str = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
if pil_image:
_lowercase : Tuple = input_image * 0.5 + 0.5
_lowercase : str = input_image.clamp(0 , 1 )
_lowercase : List[str] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowercase : Optional[int] = DiffusionPipeline.numpy_to_pil(UpperCamelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : List[str] = 'cpu'
_lowercase : str = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ )
_lowercase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : Dict = pipe(**UpperCamelCase_ )
_lowercase : Union[str, Any] = output.images
_lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : List[Any] = pipe(
**UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Union[str, Any] = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = 'cpu'
_lowercase : str = self.get_dummy_components()
_lowercase : int = self.pipeline_class(**UpperCamelCase_ )
_lowercase : List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : int = pipe(**UpperCamelCase_ )
_lowercase : Dict = output.images
_lowercase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : Union[str, Any] = pipe(
**UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : str = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = 'cpu'
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**UpperCamelCase_ )
_lowercase : Optional[int] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Tuple = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_lowercase : List[Any] = pipe(**UpperCamelCase_ )
_lowercase : Union[str, Any] = output.images
_lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_lowercase : Tuple = pipe(
**UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_lowercase : Dict = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = torch.device('cpu' )
class lowerCamelCase__ :
'''simple docstring'''
A_ = 1
_lowercase : Tuple = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ )
_lowercase : str = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
_lowercase : Dict = pipe.decoder.dtype
_lowercase : Tuple = 1
_lowercase : Union[str, Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_lowercase : Tuple = pipe.prepare_latents(
UpperCamelCase_ , dtype=UpperCamelCase_ , device=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , scheduler=DummyScheduler() )
_lowercase : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_lowercase : List[Any] = pipe.prepare_latents(
UpperCamelCase_ , dtype=UpperCamelCase_ , device=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , scheduler=DummyScheduler() )
_lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
_lowercase : List[Any] = pipe(
**UpperCamelCase_ , decoder_latents=UpperCamelCase_ , super_res_latents=UpperCamelCase_ ).images
_lowercase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ , pil_image=UpperCamelCase_ )
# Don't pass image, instead pass embedding
_lowercase : Optional[Any] = pipeline_inputs.pop('image' )
_lowercase : Optional[int] = pipe.image_encoder(UpperCamelCase_ ).image_embeds
_lowercase : Optional[Any] = pipe(
**UpperCamelCase_ , decoder_latents=UpperCamelCase_ , super_res_latents=UpperCamelCase_ , image_embeddings=UpperCamelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_lowercase : Any = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase_ , expected_max_diff=UpperCamelCase_ )
@skip_mps
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : List[str] = True
_lowercase : List[str] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , additional_params_copy_to_batched_inputs=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_lowercase : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCamelCase_ , additional_params_copy_to_batched_inputs=UpperCamelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCamelCase_ )
@skip_mps
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_lowercase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_lowercase : Any = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_lowercase : Optional[int] = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : str = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Any = pipeline(
UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
_lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ , 15 )
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , UpperCAmelCase__ , )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowercase : Dict = [p / w for p, w in zip(lowercase__, lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowercase : List[str] = sorted(lowercase__ )
# declaring useful variables
_lowercase : Optional[Any] = len(lowercase__ )
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
_lowercase : List[str] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowercase : Union[str, Any] = sorted_profit_by_weight[length - i - 1]
_lowercase : List[str] = profit_by_weight.index(lowercase__ )
_lowercase : List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_A : Optional[int] =[int(x) for x in input('''Input profits separated by spaces: ''').split()]
_A : Any =[int(x) for x in input('''Input weights separated by spaces: ''').split()]
_A : int =int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
_lowercase : Dict = len(_UpperCamelCase )
for i in range(length - 1 ):
_lowercase : Any = i
for k in range(i + 1, _UpperCamelCase ):
if collection[k] < collection[least]:
_lowercase : List[Any] = k
if least != i:
_lowercase , _lowercase : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_A : Union[str, Any] =input('''Enter numbers separated by a comma:\n''').strip()
_A : List[Any] =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowercase : Optional[int] = len(A__ ) - 1
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : Optional[Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : int ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : str = self.basis_function(A__ )
_lowercase : str = 0.0
_lowercase : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] = 0.01 ) -> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_lowercase : int = [] # x coordinates of points to plot
_lowercase : Union[str, Any] = [] # y coordinates of points to plot
_lowercase : List[str] = 0.0
while t <= 1:
_lowercase : List[Any] = self.bezier_curve_function(A__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowercase : Optional[Any] = [i[0] for i in self.list_of_points]
_lowercase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
A__ , A__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(A__ , A__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Union[str, Any] =logging.get_logger(__name__)
_A : Optional[Any] ={
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowerCamelCase__ ( A , A ):
'''simple docstring'''
A_ = 'resnet'
A_ = ['basic', 'bottleneck']
def __init__( self : List[Any] , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=64 , UpperCamelCase_ : Optional[int]=[256, 512, 1024, 2048] , UpperCamelCase_ : str=[3, 4, 6, 3] , UpperCamelCase_ : Any="bottleneck" , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : str=False , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : str , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_lowercase : Optional[int] = num_channels
_lowercase : Tuple = embedding_size
_lowercase : Any = hidden_sizes
_lowercase : Union[str, Any] = depths
_lowercase : List[str] = layer_type
_lowercase : List[str] = hidden_act
_lowercase : List[str] = downsample_in_first_stage
_lowercase : List[str] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
_lowercase , _lowercase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-3
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : int ={
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
_lowercase : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_, max_perimeter + 1 ):
_lowercase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_lowercase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __UpperCamelCase ( _lowercase = 1000 ) -> List[Any]:
_lowercase : Dict = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from copy import deepcopy
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[str] = None ) -> Union[str, Any]:
'''simple docstring'''
if arr is None and size is not None:
_lowercase : str = size
_lowercase : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError('Either arr or size must be specified' )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = len(lowercase__ )
_lowercase : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
_lowercase : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_lowercase : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
return index - (index & (-index))
def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowercase : Tuple = self.next_(lowercase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if right == 0:
return 0
_lowercase : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowercase : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
return self.query(lowercase__ , index + 1 )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_lowercase : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowercase : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from math import ceil
def __UpperCamelCase ( _lowercase = 1001 ) -> int:
_lowercase : Union[str, Any] = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
_lowercase : Any = 2 * i + 1
_lowercase : str = 2 * i
_lowercase : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_A : List[Any] =int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
import os
def __UpperCamelCase ( _lowercase = "input.txt" ) -> Optional[Any]:
with open(os.path.join(os.path.dirname(_lowercase ), _lowercase ) ) as input_file:
_lowercase : List[str] = [
[int(_lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase : Optional[Any] = len(_lowercase )
_lowercase : Optional[int] = len(matrix[0] )
_lowercase : Optional[int] = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
_lowercase : Any = matrix[i][0]
for j in range(1, _lowercase ):
for i in range(_lowercase ):
_lowercase : Optional[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, _lowercase ):
_lowercase : Optional[Any] = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
_lowercase : Union[str, Any] = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
A_ = ConsistencyModelPipeline
A_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
A_ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Any=0 ) -> Optional[int]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
_lowercase : Optional[int] = torch.manual_seed(__a )
else:
_lowercase : List[str] = torch.Generator(device=__a ).manual_seed(__a )
_lowercase : Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Tuple = self.get_dummy_components()
_lowercase : Optional[int] = ConsistencyModelPipeline(**__a )
_lowercase : int = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Dict = self.get_dummy_inputs(__a )
_lowercase : Optional[Any] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Union[str, Any] = self.get_dummy_components(class_cond=__a )
_lowercase : List[Any] = ConsistencyModelPipeline(**__a )
_lowercase : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Tuple = self.get_dummy_inputs(__a )
_lowercase : Tuple = 0
_lowercase : List[str] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : List[Any] = ConsistencyModelPipeline(**__a )
_lowercase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Union[str, Any] = self.get_dummy_inputs(__a )
_lowercase : Union[str, Any] = 1
_lowercase : Union[str, Any] = None
_lowercase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[Any] = self.get_dummy_components(class_cond=__a )
_lowercase : List[str] = ConsistencyModelPipeline(**__a )
_lowercase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Union[str, Any] = self.get_dummy_inputs(__a )
_lowercase : List[Any] = 1
_lowercase : List[str] = None
_lowercase : int = 0
_lowercase : int = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[Any]="cpu" , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Union[str, Any]=(1, 3, 64, 64) ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = torch.manual_seed(__a )
_lowercase : Optional[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_lowercase : List[str] = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
_lowercase : List[Any] = latents
return inputs
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str=0 , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : List[str]=torch.floataa , UpperCamelCase_ : Tuple=(1, 3, 64, 64) ) -> Dict:
'''simple docstring'''
if type(__a ) == str:
_lowercase : int = torch.device(__a )
_lowercase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
_lowercase : List[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Any = self.get_inputs()
_lowercase : List[str] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : List[str] = self.get_inputs()
_lowercase : List[str] = 1
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Any = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Optional[int] = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_lowercase : Union[str, Any] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Tuple = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Optional[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_lowercase : int = self.get_inputs(get_fixed_latents=__a , device=__a )
_lowercase : Any = 1
_lowercase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_lowercase : int = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase = 6008_5147_5143 ) -> int:
try:
_lowercase : Optional[int] = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
_lowercase : Optional[int] = 1
_lowercase : List[Any] = 2
while i * i <= n:
while n % i == 0:
_lowercase : Optional[Any] = i
n //= i
i += 1
if n > 1:
_lowercase : List[str] = n
return int(__UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A : int =1_6
_A : int =3_2
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase = 16 ) -> Any:
_lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowercase : Any = DatasetDict(
{
'train': dataset['train'].select(_lowercase ),
'validation': dataset['train'].select(_lowercase ),
'test': dataset['validation'],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : str = tokenizer(examples['sentence1'], examples['sentence2'], truncation=_lowercase, max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase : Any = datasets.map(
_lowercase, batched=_lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : Optional[Any] = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase : int = 16
elif accelerator.mixed_precision != "no":
_lowercase : Optional[int] = 8
else:
_lowercase : Dict = None
return tokenizer.pad(
_lowercase, padding='longest', max_length=_lowercase, pad_to_multiple_of=_lowercase, return_tensors='pt', )
# Instantiate dataloaders.
_lowercase : Optional[int] = DataLoader(
tokenized_datasets['train'], shuffle=_lowercase, collate_fn=_lowercase, batch_size=_lowercase )
_lowercase : List[Any] = DataLoader(
tokenized_datasets['validation'], shuffle=_lowercase, collate_fn=_lowercase, batch_size=_lowercase )
_lowercase : List[str] = DataLoader(
tokenized_datasets['test'], shuffle=_lowercase, collate_fn=_lowercase, batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
_lowercase : List[Any] = []
# Download the dataset
_lowercase : Dict = load_dataset('glue', 'mrpc' )
# Create our splits
_lowercase : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowercase : Tuple = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : str = config['lr']
_lowercase : Dict = int(config['num_epochs'] )
_lowercase : Union[str, Any] = int(config['seed'] )
_lowercase : int = int(config['batch_size'] )
_lowercase : List[Any] = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowercase : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase : int = batch_size // MAX_GPU_BATCH_SIZE
_lowercase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
_lowercase : Optional[int] = kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
_lowercase : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
_lowercase , _lowercase , _lowercase : Tuple = get_fold_dataloaders(
_lowercase, _lowercase, _lowercase, _lowercase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase : int = model.to(accelerator.device )
# Instantiate optimizer
_lowercase : Dict = AdamW(params=model.parameters(), lr=_lowercase )
# Instantiate scheduler
_lowercase : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase, num_warmup_steps=100, num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = accelerator.prepare(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase : str = model(**_lowercase )
_lowercase : str = outputs.loss
_lowercase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : List[str] = model(**_lowercase )
_lowercase : List[Any] = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowercase, references=_lowercase, )
_lowercase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', _lowercase )
# New Code #
# We also run predictions on the test set at the very end
_lowercase : int = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : Optional[Any] = model(**_lowercase )
_lowercase : List[str] = outputs.logits
_lowercase , _lowercase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowercase : List[Any] = torch.cat(_lowercase, dim=0 )
_lowercase : Union[str, Any] = torch.stack(_lowercase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowercase : List[Any] = metric.compute(predictions=_lowercase, references=_lowercase )
accelerator.print('Average test metrics from all folds:', _lowercase )
def __UpperCamelCase ( ) -> Optional[Any]:
_lowercase : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=_lowercase, default=_lowercase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=_lowercase, default=3, help='The number of splits to perform across the dataset' )
_lowercase : int = parser.parse_args()
_lowercase : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_lowercase, _lowercase )
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ = KandinskyVaaImgaImgPipeline
A_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
A_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
A_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ = False
@property
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowercase : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : Any = self.dummy_unet
_lowercase : List[Any] = self.dummy_movq
_lowercase : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowercase : int = DDIMScheduler(**_a )
_lowercase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=0 ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_lowercase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((256, 256) )
if str(_a ).startswith('mps' ):
_lowercase : Tuple = torch.manual_seed(_a )
else:
_lowercase : str = torch.Generator(device=_a ).manual_seed(_a )
_lowercase : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = """cpu"""
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_a )
_lowercase : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_lowercase : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_lowercase : Dict = output.images
_lowercase : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_lowercase : Dict = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Optional[int] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Dict = """A red cartoon frog, 4k"""
_lowercase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_lowercase : int = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_lowercase : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_lowercase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_lowercase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_A : str =logging.get_logger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase=False, _lowercase=False ) -> Any:
_lowercase : str = '''backbone.''' if is_semantic else ''''''
_lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=False, _lowercase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
_lowercase : Optional[Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
_lowercase : str = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_lowercase : Dict = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_lowercase : Dict = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_lowercase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowercase : int = q_bias
_lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_lowercase : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_lowercase : Any = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_lowercase : Dict = gamma_a
_lowercase : str = gamma_a
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : int = dct.pop(__UpperCamelCase )
_lowercase : Any = val
def __UpperCamelCase ( ) -> Any:
_lowercase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase : int = Image.open(requests.get(__UpperCamelCase, stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=False ) -> str:
_lowercase : List[str] = False if '''rvlcdip''' in checkpoint_url else True
_lowercase : int = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase, use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_lowercase : Union[str, Any] = 1024
_lowercase : List[Any] = 4096
_lowercase : Any = 24
_lowercase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
_lowercase : Tuple = 16
_lowercase : Union[str, Any] = '''huggingface/label-files'''
_lowercase : int = '''rvlcdip-id2label.json'''
_lowercase : Dict = json.load(open(hf_hub_download(__UpperCamelCase, __UpperCamelCase, repo_type='dataset' ), 'r' ) )
_lowercase : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_lowercase : int = torch.hub.load_state_dict_from_url(__UpperCamelCase, map_location='cpu' )['''model''']
_lowercase : List[str] = create_rename_keys(__UpperCamelCase, has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase, __UpperCamelCase, has_lm_head=__UpperCamelCase )
# load HuggingFace model
_lowercase : Optional[Any] = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
_lowercase : Optional[int] = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=__UpperCamelCase )
_lowercase : Optional[Any] = prepare_img()
_lowercase : Any = image_processor(images=__UpperCamelCase, return_tensors='pt' )
_lowercase : List[str] = encoding['''pixel_values''']
_lowercase : Union[str, Any] = model(__UpperCamelCase )
_lowercase : Dict = outputs.logits
# verify logits
_lowercase : str = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
_lowercase : int = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
_lowercase : Dict = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase, __UpperCamelCase ), organization='nielsr', commit_message='Add image processor', use_temp_dir=__UpperCamelCase, )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase, __UpperCamelCase ), organization='nielsr', commit_message='Add model', use_temp_dir=__UpperCamelCase, )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_A : str =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] = logging.get_logger(__name__)
_A : List[str] = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
A_ = "convbert"
def __init__( self : int , UpperCamelCase_ : int=3_0522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : List[Any]=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[Any]=768 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=9 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
_lowercase : int = vocab_size
_lowercase : Optional[int] = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : List[Any] = embedding_size
_lowercase : int = head_ratio
_lowercase : Union[str, Any] = conv_kernel_size
_lowercase : Dict = num_groups
_lowercase : Tuple = classifier_dropout
class lowerCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_A : Tuple =data_utils.TransfoXLTokenizer
_A : Union[str, Any] =data_utils.TransfoXLCorpus
_A : Optional[Any] =data_utils
_A : Optional[Any] =data_utils
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case, 'rb' ) as fp:
_lowercase : Optional[int] = pickle.load(__snake_case, encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowercase : Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_lowercase : Union[str, Any] = corpus.vocab.__dict__
torch.save(__snake_case, __snake_case )
_lowercase : str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', __snake_case )
_lowercase : str = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__snake_case, __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowercase : Tuple = os.path.abspath(__snake_case )
_lowercase : int = os.path.abspath(__snake_case )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowercase : Dict = TransfoXLConfig()
else:
_lowercase : List[Any] = TransfoXLConfig.from_json_file(__snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowercase : str = TransfoXLLMHeadModel(__snake_case )
_lowercase : Any = load_tf_weights_in_transfo_xl(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
_lowercase : Dict = os.path.join(__snake_case, __snake_case )
_lowercase : int = os.path.join(__snake_case, __snake_case )
print(f'''Save PyTorch model to {os.path.abspath(__snake_case )}''' )
torch.save(model.state_dict(), __snake_case )
print(f'''Save configuration file to {os.path.abspath(__snake_case )}''' )
with open(__snake_case, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_A : Tuple =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCamelCase ( _lowercase=None, _lowercase=None ) -> str:
return field(default_factory=lambda: default, metadata=a__ )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
A_ = field(
metadata={"""help""": """The csv file to plot."""} , )
A_ = field(
default=__lowercase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
A_ = field(
default=__lowercase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
A_ = field(
default=__lowercase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
A_ = field(
default=__lowercase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
A_ = field(
default=__lowercase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
A_ = list_field(
default=__lowercase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def __UpperCamelCase ( _lowercase ) -> Tuple:
try:
int(a__ )
return True
except ValueError:
return False
def __UpperCamelCase ( _lowercase ) -> Any:
try:
float(a__ )
return True
except ValueError:
return False
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = args
_lowercase : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_lowercase : int = csv.DictReader(__A )
for row in reader:
_lowercase : Tuple = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_lowercase : Tuple = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_lowercase : Optional[Any] = float(row['result'] )
def __UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
_lowercase , _lowercase : int = plt.subplots()
_lowercase : Optional[Any] = 'Time usage' if self.args.is_time else 'Memory usage'
_lowercase : Any = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_lowercase : str = sorted(set(self.result_dict[model_name]['bsz'] ) )
_lowercase : Optional[Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_lowercase : Tuple = self.result_dict[model_name]['result']
((_lowercase) , (_lowercase)) : Optional[int] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowercase : Dict = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowercase : Any = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__A , )
else:
_lowercase : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_lowercase) , (_lowercase)) : Optional[int] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_lowercase : Optional[int] = np.asarray(__A , __A )[: len(__A )]
plt.scatter(
__A , __A , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__A , __A , '--' )
title_str += F''' {label_model_name} vs.'''
_lowercase : str = title_str[:-4]
_lowercase : Dict = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__A )
plt.xlabel(__A )
plt.ylabel(__A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCamelCase ( ) -> Any:
_lowercase : Dict = HfArgumentParser(a__ )
_lowercase : Any = parser.parse_args_into_dataclasses()[0]
_lowercase : str = Plot(args=a__ )
plot.plot()
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_A : List[Any] =1_6
_A : Tuple =3_2
def __UpperCamelCase ( _lowercase ) -> str:
return int(x / 2**20 )
class lowerCamelCase__ :
'''simple docstring'''
def __enter__( self : int ) -> Tuple:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_lowercase : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self : int , *UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_lowercase : Union[str, Any] = torch.cuda.memory_allocated()
_lowercase : Dict = torch.cuda.max_memory_allocated()
_lowercase : List[Any] = bamb(self.end - self.begin )
_lowercase : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __UpperCamelCase ( _lowercase, _lowercase = 16, _lowercase = "bert-base-cased", _lowercase = 320, _lowercase = 160, ) -> Optional[Any]:
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_lowercase : Dict = load_dataset(
'glue', 'mrpc', split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : List[str] = tokenizer(examples['sentence1'], examples['sentence2'], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase : int = datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : Any = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
_lowercase : str = DataLoader(
tokenized_datasets['train'], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
_lowercase : Optional[Any] = DataLoader(
tokenized_datasets['validation'], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[int]:
# Initialize accelerator
_lowercase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : str = config['lr']
_lowercase : List[str] = int(config['num_epochs'] )
_lowercase : Union[str, Any] = int(config['seed'] )
_lowercase : List[str] = int(config['batch_size'] )
_lowercase : Union[str, Any] = args.model_name_or_path
set_seed(UpperCamelCase__ )
_lowercase , _lowercase : List[Any] = get_dataloaders(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
# Instantiate optimizer
_lowercase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase : List[str] = optimizer_cls(params=model.parameters(), lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase : Any = 1
_lowercase : Tuple = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase : int = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=0, num_training_steps=UpperCamelCase__, )
else:
_lowercase : Optional[Any] = DummyScheduler(UpperCamelCase__, total_num_steps=UpperCamelCase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_lowercase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase : Optional[Any] = 0
# Now we train the model
_lowercase : Optional[Any] = {}
for epoch in range(UpperCamelCase__, UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
_lowercase : Optional[Any] = model(**UpperCamelCase__ )
_lowercase : List[str] = outputs.loss
_lowercase : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_lowercase : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, 'peak_memory_utilization.json' ), 'w' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def __UpperCamelCase ( ) -> List[str]:
_lowercase : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=UpperCamelCase__, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=UpperCamelCase__, )
parser.add_argument(
'--output_dir', type=UpperCamelCase__, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--peak_memory_upper_bound', type=UpperCamelCase__, default=UpperCamelCase__, help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.', )
parser.add_argument(
'--n_train', type=UpperCamelCase__, default=320, help='Number of training examples to use.', )
parser.add_argument(
'--n_val', type=UpperCamelCase__, default=160, help='Number of validation examples to use.', )
parser.add_argument(
'--num_epochs', type=UpperCamelCase__, default=1, help='Number of train epochs.', )
_lowercase : List[Any] = parser.parse_args()
_lowercase : int = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
return round(float(moles / volume ) * nfactor )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Any:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[Any]:
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
_A : Dict ='''#'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> None:
'''simple docstring'''
_lowercase : dict = {}
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : Optional[int] = self._trie
for char in text:
if char not in trie:
_lowercase : str = {}
_lowercase : Tuple = trie[char]
_lowercase : int = True
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple ) -> tuple | list:
'''simple docstring'''
_lowercase : Optional[Any] = self._trie
for char in prefix:
if char in trie:
_lowercase : str = trie[char]
else:
return []
return self._elements(__a )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Any ) -> tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = []
for c, v in d.items():
_lowercase : List[str] = [' '] if c == END else [(c + s) for s in self._elements(__a )]
result.extend(__a )
return tuple(__a )
_A : Any =Trie()
_A : Tuple =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowercase ) -> tuple:
_lowercase : int = trie.find_word(_lowercase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
assert isinstance(_lowercase, _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : Optional[int] = tmp_path / 'cache'
_lowercase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Tuple = ParquetDatasetReader(_lowercase, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_parquet_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : int = tmp_path / 'cache'
_lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : List[Any] = features.copy() if features else default_expected_features
_lowercase : str = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Any = ParquetDatasetReader(_lowercase, features=_lowercase, cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : List[str] = ParquetDatasetReader(_lowercase, cache_dir=_lowercase, split=_lowercase ).read()
_check_parquet_dataset(_lowercase, _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
if issubclass(_lowercase, _lowercase ):
_lowercase : int = parquet_path
elif issubclass(_lowercase, _lowercase ):
_lowercase : Any = [parquet_path]
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Any = ParquetDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase, _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=("train",) ) -> int:
assert isinstance(_lowercase, _lowercase )
for split in splits:
_lowercase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path / 'cache'
_lowercase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Any = ParquetDatasetReader(
{'train': parquet_path}, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_parquet_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : List[str] = tmp_path / 'cache'
_lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : str = features.copy() if features else default_expected_features
_lowercase : Dict = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : List[str] = ParquetDatasetReader({'train': parquet_path}, features=_lowercase, cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
if split:
_lowercase : Tuple = {split: parquet_path}
else:
_lowercase : List[Any] = 'train'
_lowercase : List[str] = {'train': parquet_path, 'test': parquet_path}
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Tuple = ParquetDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase, _lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[int] = ParquetDatasetWriter(_lowercase, tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : Any = pq.ParquetFile(tmp_path / 'foo.parquet' )
_lowercase : int = pf.read()
assert dataset.data.table == output_table
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[Any]:
_lowercase : List[Any] = str(shared_datadir / 'test_image_rgb.jpg' )
_lowercase : List[Any] = {'image': [image_path]}
_lowercase : Dict = Features({'image': Image()} )
_lowercase : str = Dataset.from_dict(_lowercase, features=_lowercase )
_lowercase : int = ParquetDatasetWriter(_lowercase, tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : Tuple = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_lowercase : List[str] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ), streaming=_lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected', [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
assert get_writer_batch_size(_lowercase ) == expected
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_A : str =logging.get_logger(__name__)
class lowerCamelCase__ ( __snake_case ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
_A : int =tuple[float, float, float]
_A : Optional[Any] =tuple[float, float, float]
def __UpperCamelCase ( _lowercase, _lowercase ) -> Vectorad:
_lowercase : List[Any] = end_pointa[0] - end_pointa[0]
_lowercase : Union[str, Any] = end_pointa[1] - end_pointa[1]
_lowercase : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __UpperCamelCase ( _lowercase, _lowercase ) -> Vectorad:
_lowercase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowercase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowercase : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __UpperCamelCase ( _lowercase, _lowercase ) -> bool:
return tuple(round(lowercase_, lowercase_ ) for x in vector ) == (0, 0, 0)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase = 10 ) -> bool:
_lowercase : Tuple = create_vector(lowercase_, lowercase_ )
_lowercase : Dict = create_vector(lowercase_, lowercase_ )
return is_zero_vector(get_ad_vectors_cross(lowercase_, lowercase_ ), lowercase_ )
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __UpperCamelCase ( ) -> Tuple:
raise RuntimeError('CUDA out of memory.' )
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_lowercase : List[Any] = nn.Linear(3 , 4 )
_lowercase : Dict = nn.BatchNormad(4 )
_lowercase : Optional[Any] = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Any = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase_ : int ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowercase , _lowercase : Optional[Any] = mock_training_loop_function('hello' )
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase_ : Dict ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase_ : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def __UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase_ : Tuple ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : Any = torch.cuda.memory_allocated()
_lowercase : Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _a )
_lowercase : Optional[Any] = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() , _a )
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowercase : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_lowercase : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase : Optional[Any] = {'unk_token': '<unk>'}
_lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __UpperCAmelCase ( self : str , **UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = 'lower newer'
_lowercase : Dict = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase : Optional[int] = 'lower newer'
_lowercase : Dict = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase : List[Any] = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
_lowercase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowercase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowercase : Any = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
_lowercase : Any = 'lower newer'
# Testing tokenization
_lowercase : Optional[int] = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
_lowercase : Optional[int] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids without special tokens
_lowercase : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
_lowercase : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids with special tokens
_lowercase : Any = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
_lowercase : int = tokenizer.encode(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
_lowercase : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing the unknown token
_lowercase : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
_lowercase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __UpperCAmelCase ( self : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict=15 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
_lowercase : Any = 'This is a simple input'
_lowercase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : Dict = ('This is a simple input', 'This is a pair')
_lowercase : Union[str, Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowercase : Tuple = 'This is a simple input'
_lowercase : List[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_lowercase : Optional[Any] = ('This is a simple input', 'This is a pair')
_lowercase : str = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowercase : Optional[int] = tokenizer.pad_token_id
_lowercase : Union[str, Any] = tokenizer(UpperCamelCase__ , padding='max_length' , max_length=30 , return_tensors='np' )
_lowercase : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors='np' )
_lowercase : Optional[Any] = tokenizer(*UpperCamelCase__ , padding='max_length' , max_length=60 , return_tensors='np' )
_lowercase : Union[str, Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = '$$$'
_lowercase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase__ , add_bos_token=UpperCamelCase__ )
_lowercase : List[str] = 'This is a simple input'
_lowercase : int = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : List[Any] = tokenizer.bos_token_id
_lowercase : str = tokenizer(UpperCamelCase__ )
_lowercase : Union[str, Any] = tokenizer(UpperCamelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowercase : Optional[int] = tokenizer.decode(out_s.input_ids )
_lowercase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
_lowercase : Optional[int] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_lowercase : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_lowercase : List[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
_lowercase : List[Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_lowercase : Dict = tokenizer.decode(UpperCamelCase__ , truncate_before_pattern=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
pass
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_A : Dict =logging.get_logger(__name__)
_A : Optional[int] ="""Hello world! cécé herlolip"""
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : str = FairseqRobertaModel.from_pretrained(_lowercase )
roberta.eval() # disable dropout
_lowercase : str = roberta.model.encoder.sentence_encoder
_lowercase : List[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1E-5, )
if classification_head:
_lowercase : Union[str, Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:', _lowercase )
_lowercase : Tuple = XLMRobertaXLForSequenceClassification(_lowercase ) if classification_head else XLMRobertaXLForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowercase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
_lowercase : str = roberta_sent_encoder.embed_positions.weight
_lowercase : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowercase : str = roberta_sent_encoder.layer_norm.weight
_lowercase : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowercase : List[Any] = model.roberta.encoder.layer[i]
_lowercase : str = roberta_sent_encoder.layers[i]
_lowercase : int = layer.attention
_lowercase : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
_lowercase : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowercase : Union[str, Any] = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowercase : Tuple = roberta_layer.self_attn.q_proj.weight
_lowercase : List[Any] = roberta_layer.self_attn.q_proj.bias
_lowercase : str = roberta_layer.self_attn.k_proj.weight
_lowercase : str = roberta_layer.self_attn.k_proj.bias
_lowercase : Optional[int] = roberta_layer.self_attn.v_proj.weight
_lowercase : List[str] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowercase : Optional[int] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowercase : Optional[int] = roberta_layer.self_attn.out_proj.weight
_lowercase : List[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowercase : Any = roberta_layer.final_layer_norm.weight
_lowercase : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
_lowercase : int = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase : Tuple = roberta_layer.fca.weight
_lowercase : Dict = roberta_layer.fca.bias
# output
_lowercase : Dict = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase : List[str] = roberta_layer.fca.weight
_lowercase : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowercase : Any = roberta.model.classification_heads['mnli'].dense.weight
_lowercase : Optional[int] = roberta.model.classification_heads['mnli'].dense.bias
_lowercase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight
_lowercase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_lowercase : str = roberta.model.encoder.lm_head.dense.weight
_lowercase : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
_lowercase : Dict = roberta.model.encoder.lm_head.layer_norm.weight
_lowercase : Any = roberta.model.encoder.lm_head.layer_norm.bias
_lowercase : Optional[int] = roberta.model.encoder.lm_head.weight
_lowercase : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowercase : Tuple = roberta.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
_lowercase : Optional[int] = model(_lowercase )[0]
if classification_head:
_lowercase : Optional[int] = roberta.model.classification_heads['mnli'](roberta.extract_features(_lowercase ) )
else:
_lowercase : Dict = roberta.model(_lowercase )[0]
print(our_output.shape, their_output.shape )
_lowercase : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowercase : Optional[int] = torch.allclose(_lowercase, _lowercase, atol=1E-3 )
print('Do both models output the same tensors?', '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_lowercase ).mkdir(parents=_lowercase, exist_ok=_lowercase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_A : int =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[str] = True , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : List[Any] = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[str] = True , UpperCamelCase_ : List[str] = "arrow" , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
_lowercase : Any = load_from_cache_file
_lowercase : Optional[int] = file_format
_lowercase : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowercase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , **UpperCamelCase_ : Dict ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : int = {}
if "candidate_labels" in kwargs:
_lowercase : Optional[Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : Optional[int] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[Any]="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = load_image(UpperCamelCase_ )
_lowercase : Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Union[str, Any] = candidate_labels
_lowercase : List[str] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Optional[int] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : List[str] = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = model_inputs.pop('candidate_labels' )
_lowercase : Any = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Tuple = text_inputs[0]
else:
# Batching case.
_lowercase : Union[str, Any] = text_inputs[0][0]
_lowercase : List[str] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = model_outputs.pop('candidate_labels' )
_lowercase : List[str] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : str = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Optional[Any] = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Union[str, Any] = [scores]
elif self.framework == "tf":
_lowercase : Tuple = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : Any = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : Optional[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_A : List[Any] =[int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __UpperCamelCase ( ) -> Union[str, Any]:
_lowercase : str = os.path.dirname(os.path.realpath(__lowercase ) )
_lowercase : Tuple = os.path.join(__lowercase, 'words.txt' )
_lowercase : str = ''
with open(__lowercase ) as f:
_lowercase : Optional[Any] = f.readline()
_lowercase : Tuple = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_lowercase : Optional[Any] = [
word
for word in [sum(ord(__lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowercase )
if __name__ == "__main__":
print(solution())
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_A : Optional[Any] =2
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , *, # begin keyword-only arguments
UpperCamelCase_ : Optional[Any]="<s>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int=None , ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = bos, unk, pad, eos
_lowercase : List[Any] = []
_lowercase : Dict = []
_lowercase : List[str] = {}
_lowercase : Union[str, Any] = self.add_symbol(_A )
_lowercase : Tuple = self.add_symbol(_A )
_lowercase : Optional[Any] = self.add_symbol(_A )
_lowercase : Tuple = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
_lowercase : Optional[Any] = len(self.symbols )
def __eq__( self : Optional[Any] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Tuple , UpperCamelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return sym in self.indices
@classmethod
def __UpperCAmelCase ( cls : List[str] , UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = cls()
d.add_from_file(_A )
return d
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
if word in self.indices and not overwrite:
_lowercase : str = self.indices[word]
_lowercase : Union[str, Any] = self.count[idx] + n
return idx
else:
_lowercase : int = len(self.symbols )
_lowercase : Any = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return 0
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
if isinstance(_A , _A ):
try:
with open(_A , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_A ) )
return
_lowercase : Optional[int] = f.readlines()
_lowercase : Optional[Any] = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
_lowercase , _lowercase : Dict = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_lowercase : Optional[Any] = True
_lowercase , _lowercase : Dict = line.rsplit(' ' , 1 )
else:
_lowercase : Dict = False
_lowercase : str = int(_A )
_lowercase : Optional[int] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __UpperCamelCase ( _lowercase ) -> int:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_lowercase : Tuple = dict((re.sub(r'@@$', '', __snake_case ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', __snake_case ), v) for k, v in d.items() )
_lowercase : Dict = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_lowercase : Dict = d[k] # restore
return da
def __UpperCamelCase ( _lowercase, _lowercase ) -> Any:
# prep
if not os.path.exists(__snake_case ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__snake_case, exist_ok=__snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_lowercase : Any = os.path.join(__snake_case, 'checkpoint.pt' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
_lowercase : Dict = torch.load(__snake_case, map_location='cpu' )
_lowercase : Dict = chkpt['cfg']['model']
# dicts
_lowercase : Tuple = os.path.join(__snake_case, 'dict.txt' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
_lowercase : int = Dictionary.load(__snake_case )
_lowercase : int = rewrite_dict_keys(src_dict.indices )
_lowercase : str = len(__snake_case )
_lowercase : Optional[int] = os.path.join(__snake_case, VOCAB_FILES_NAMES['vocab_file'] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__snake_case, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# merges_file (bpecodes)
_lowercase : Any = os.path.join(__snake_case, 'bpecodes' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
_lowercase : List[Any] = os.path.join(__snake_case, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__snake_case, __snake_case )
# model config
_lowercase : List[Any] = os.path.join(__snake_case, 'config.json' )
_lowercase : Optional[int] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.0_2,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(__snake_case, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# tokenizer config
_lowercase : List[Any] = os.path.join(__snake_case, __snake_case )
_lowercase : Optional[int] = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(__snake_case, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# model
_lowercase : int = chkpt['model']
# remove unneeded keys
_lowercase : List[str] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__snake_case, __snake_case )
_lowercase : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_lowercase : Dict = model_state_dict.pop(__snake_case )
else:
_lowercase : str = model_state_dict.pop(__snake_case )
_lowercase : Optional[Any] = BioGptConfig.from_pretrained(__snake_case )
_lowercase : List[Any] = BioGptForCausalLM(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case )
# save
_lowercase : Any = os.path.join(__snake_case, __snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__snake_case, __snake_case )
print('Conversion is done!' )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : Tuple =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
A_ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
A_ = '''CIDAS/clipseg-rd64-refined'''
A_ = '''image_segmenter'''
A_ = CLIPSegForImageSegmentation
A_ = ['''image''', '''text''']
A_ = ['''image''']
def __init__( self : List[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*a_ , **a_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=a_ , return_tensors='pt' )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
_lowercase : int = self.model(**a_ ).logits
return logits
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : str = outputs.cpu().detach().numpy()
_lowercase : Union[str, Any] = 0
_lowercase : Dict = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
_A : List[str] ='''Input must be a string of 8 numbers plus letter'''
_A : str ='''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCamelCase ( _lowercase ) -> int:
if not isinstance(_snake_case, _snake_case ):
_lowercase : List[Any] = f'''Expected string as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
_lowercase : str = spanish_id.replace('-', '' ).upper()
if len(_snake_case ) != 9:
raise ValueError(_snake_case )
try:
_lowercase : Optional[Any] = int(spanish_id_clean[0:8] )
_lowercase : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case ) from ex
if letter.isdigit():
raise ValueError(_snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A : List[str] ={
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =[
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_lowercase ) -> List[Any]:
with open(A_, 'r' ) as fh:
fcntl.flock(A_, fcntl.LOCK_EX )
try:
print(*A_ )
finally:
fcntl.flock(A_, fcntl.LOCK_UN )
_A : Tuple =int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
_A : str =torch.device('''cuda''', local_rank)
_A : int =socket.gethostname()
_A : Union[str, Any] =F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_A : Optional[Any] =dist.get_rank()
_A : Union[str, Any] =dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Tuple = multiprocessing.Manager()
_lowercase : int = manager.list()
_lowercase : Dict = multiprocessing.Process(target=_lowercase, args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_lowercase : Any = shutil.rmtree
_lowercase : Tuple = os.rmdir
_lowercase : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_lowercase : Tuple = {}
with swallow_io():
with time_limit(_lowercase ):
exec(_lowercase, _lowercase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
_lowercase : Dict = rmtree
_lowercase : str = rmdir
_lowercase : Optional[int] = chdir
@contextlib.contextmanager
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
def signal_handler(_lowercase, _lowercase ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL, _lowercase )
signal.signal(signal.SIGALRM, _lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0 )
@contextlib.contextmanager
def __UpperCamelCase ( ) -> Any:
_lowercase : Any = WriteOnlyStringIO()
with contextlib.redirect_stdout(_lowercase ):
with contextlib.redirect_stderr(_lowercase ):
with redirect_stdin(_lowercase ):
yield
@contextlib.contextmanager
def __UpperCamelCase ( ) -> str:
with tempfile.TemporaryDirectory() as dirname:
with chdir(_lowercase ):
yield dirname
class lowerCamelCase__ ( snake_case__ ):
'''simple docstring'''
pass
class lowerCamelCase__ ( io.StringIO ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
raise OSError
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
raise OSError
def __UpperCAmelCase ( self : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
raise OSError
def __UpperCAmelCase ( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return False
class lowerCamelCase__ ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ = '''stdin'''
@contextlib.contextmanager
def __UpperCamelCase ( _lowercase ) -> List[str]:
if root == ".":
yield
return
_lowercase : str = os.getcwd()
os.chdir(_lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_lowercase )
def __UpperCamelCase ( _lowercase=None ) -> List[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_lowercase : List[Any] = None
_lowercase : Tuple = None
import os
_lowercase : int = '1'
_lowercase : List[str] = None
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : str = None
_lowercase : str = None
_lowercase : Optional[int] = None
_lowercase : Dict = None
_lowercase : int = None
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = None
_lowercase : int = None
_lowercase : str = None
_lowercase : Tuple = None
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
_lowercase : Optional[Any] = None
_lowercase : Union[str, Any] = None
_lowercase : Union[str, Any] = None
_lowercase : Any = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
_lowercase : Union[str, Any] = None
_lowercase : List[Any] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
import shutil
_lowercase : str = None
_lowercase : Dict = None
_lowercase : List[str] = None
import subprocess
_lowercase : Any = None # type: ignore
_lowercase : Optional[Any] = None
import sys
_lowercase : int = None
_lowercase : Any = None
_lowercase : Any = None
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Any = logging.getLogger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Tuple=-1 ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = label_idx
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> List[InputExample]:
'''simple docstring'''
if isinstance(__a , __a ):
_lowercase : List[Any] = mode.value
_lowercase : Dict = os.path.join(__a , F'''{mode}.txt''' )
_lowercase : Union[str, Any] = 1
_lowercase : Optional[Any] = []
with open(__a , encoding='utf-8' ) as f:
_lowercase : str = []
_lowercase : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
_lowercase : Union[str, Any] = []
_lowercase : Any = []
else:
_lowercase : str = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
return examples
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowercase : Dict = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
_lowercase : Any = f.read().splitlines()
if "O" not in labels:
_lowercase : Any = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
_lowercase : Optional[int] = f.read().splitlines()
if "O" not in labels:
_lowercase : Any = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str ) -> List[InputExample]:
'''simple docstring'''
if isinstance(__a , __a ):
_lowercase : int = mode.value
_lowercase : List[Any] = os.path.join(__a , F'''{mode}.txt''' )
_lowercase : Optional[int] = 1
_lowercase : Tuple = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
_lowercase : Optional[Any] = []
_lowercase : Any = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = 0
for sentence in parse_incr(__a ):
_lowercase : Optional[int] = preds_list[example_id]
_lowercase : Union[str, Any] = ''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__a )
example_id += 1
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_A : Any =logging.get_logger(__name__)
_A : Dict ='''T5Config'''
class lowerCamelCase__ ( lowercase_ ):
'''simple docstring'''
A_ = """mt5"""
A_ = MTaConfig
class lowerCamelCase__ ( lowercase_ ):
'''simple docstring'''
A_ = """mt5"""
A_ = MTaConfig
class lowerCamelCase__ ( lowercase_ ):
'''simple docstring'''
A_ = """mt5"""
A_ = MTaConfig
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : pyspark.sql.DataFrame , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "arrow" , **UpperCamelCase_ : str , ) -> int:
'''simple docstring'''
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Any = load_from_cache_file
_lowercase : List[Any] = file_format
_lowercase : int = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowercase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
super().__init__()
_lowercase : int = model
_lowercase : Optional[int] = 2
_lowercase : str = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : str = LongformerModel.from_pretrained(lowerCamelCase__ )
_lowercase : int = LightningModel(lowerCamelCase__ )
_lowercase : Dict = torch.load(lowerCamelCase__, map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_lowercase : Tuple = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : List[str] =parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_A : Dict =logging.get_logger(__name__)
_A : List[str] ={
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """longformer"""
def __init__( self : Dict , UpperCamelCase_ : Union[List[int], int] = 512 , UpperCamelCase_ : int = 2 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 0 , UpperCamelCase_ : int = 2 , UpperCamelCase_ : int = 3_0522 , UpperCamelCase_ : int = 768 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 3072 , UpperCamelCase_ : str = "gelu" , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 2 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 1E-12 , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
_lowercase : List[Any] = attention_window
_lowercase : Optional[int] = sep_token_id
_lowercase : Union[str, Any] = bos_token_id
_lowercase : Tuple = eos_token_id
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[int] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Union[str, Any] = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : str = onnx_export
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : "PretrainedConfig" , UpperCamelCase_ : str = "default" , UpperCamelCase_ : "List[PatchingSpec]" = None ) -> Any:
'''simple docstring'''
super().__init__(lowercase_ , lowercase_ , lowercase_ )
_lowercase : str = True
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowercase : List[str] = super().outputs
if self.task == "default":
_lowercase : Any = {0: 'batch'}
return outputs
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : "PreTrainedTokenizerBase" , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : Dict = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : List[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Dict = 1
return inputs
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=13 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : List[str]=32 , UpperCamelCase_ : Optional[int]=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : int=False , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[int]="None" , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = parent
_lowercase : str = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Union[str, Any] = is_training
_lowercase : List[str] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : int = use_labels
_lowercase : int = vocab_size
_lowercase : Any = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : int = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : str = num_labels
_lowercase : int = num_choices
_lowercase : List[Any] = relative_attention
_lowercase : Any = position_biased_input
_lowercase : Any = pos_att_type
_lowercase : Dict = scope
def __UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowercase : Optional[Any] = None
if self.use_token_type_ids:
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Any = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_lowercase : Any = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_lowercase : Optional[Any] = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[int] = self.num_labels
_lowercase : Dict = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = self.num_labels
_lowercase : Dict = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : Optional[Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = DebertaVaModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowercase : Optional[int] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowercase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_lowercase : str = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return self.get_dummy_input()
@property
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=False , ) -> List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 4
_lowercase : List[str] = 32
_lowercase : Tuple = (32, 32)
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Tuple = torch.device(UpperCAmelCase__ )
_lowercase : int = (batch_size, num_channels) + sizes
_lowercase : Tuple = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
_lowercase : List[str] = {'''hidden_states''': hidden_states}
if include_temb:
_lowercase : Optional[int] = 128
_lowercase : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
if include_res_hidden_states_tuple:
_lowercase : Optional[Any] = torch.manual_seed(1 )
_lowercase : int = (randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ ),)
if include_encoder_hidden_states:
_lowercase : Dict = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase__ )
if include_skip_sample:
_lowercase : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
return dummy_input
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_lowercase : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_lowercase : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Dict ) -> int:
'''simple docstring'''
_lowercase : List[Any] = self.prepare_init_args_and_inputs_for_common()
_lowercase : List[str] = self.block_class(**UpperCAmelCase__ )
unet_block.to(UpperCAmelCase__ )
unet_block.eval()
with torch.no_grad():
_lowercase : Optional[int] = unet_block(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowercase : Optional[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
_lowercase : List[str] = output[0, -1, -3:, -3:]
_lowercase : Optional[Any] = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.prepare_init_args_and_inputs_for_common()
_lowercase : Optional[int] = self.block_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
_lowercase : int = model(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowercase : Optional[int] = output[0]
_lowercase : Dict = torch.device(UpperCAmelCase__ )
_lowercase : Tuple = randn_tensor(output.shape , device=UpperCAmelCase__ )
_lowercase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
loss.backward()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A : List[Any] =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Dict , **UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str = None , **UpperCamelCase_ : List[Any] , ) -> Optional[int]:
'''simple docstring'''
if "text_queries" in kwargs:
_lowercase : List[str] = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
_lowercase : Tuple = {"image": image, "candidate_labels": candidate_labels}
else:
_lowercase : Dict = image
_lowercase : Any = super().__call__(snake_case__ , **snake_case__ )
return results
def __UpperCAmelCase ( self : Dict , **UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = {}
if "threshold" in kwargs:
_lowercase : int = kwargs["threshold"]
if "top_k" in kwargs:
_lowercase : List[str] = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : Tuple = load_image(inputs['image'] )
_lowercase : List[str] = inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
_lowercase : Union[str, Any] = candidate_labels.split(',' )
_lowercase : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
_lowercase : List[str] = self.tokenizer(snake_case__ , return_tensors=self.framework )
_lowercase : Union[str, Any] = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = model_inputs.pop('target_size' )
_lowercase : Tuple = model_inputs.pop('candidate_label' )
_lowercase : int = model_inputs.pop('is_last' )
_lowercase : Union[str, Any] = self.model(**snake_case__ )
_lowercase : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Optional[Any]=None ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = []
for model_output in model_outputs:
_lowercase : Optional[int] = model_output["candidate_label"]
_lowercase : List[str] = BaseModelOutput(snake_case__ )
_lowercase : str = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
_lowercase : Dict = outputs["scores"][index].item()
_lowercase : List[Any] = self._get_bounding_box(outputs['boxes'][index][0] )
_lowercase : List[Any] = {"score": score, "label": label, "box": box}
results.append(snake_case__ )
_lowercase : Dict = sorted(snake_case__ , key=lambda UpperCamelCase_ : x["score"] , reverse=snake_case__ )
if top_k:
_lowercase : List[str] = results[:top_k]
return results
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : int ) -> Optional[int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
_lowercase : List[str] = box.int().tolist()
_lowercase : Any = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__a, __a ):
raise TypeError('Input value must be a \'int\' type' )
return bin(__a ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Union[str, Any] ={
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowercase : List[Any] = [True] * (num + 1)
_lowercase : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, _lowercase ):
_lowercase : Union[str, Any] = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Dict =int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Optional[Any]=0 ) -> List[str]:
'''simple docstring'''
_lowercase : int = np.random.RandomState(_lowerCamelCase )
_lowercase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**_lowerCamelCase ).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : List[Any] = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowercase : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : int = self.get_dummy_inputs()
_lowercase : str = pipe(**_lowerCamelCase ).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : List[Any] = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowercase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : str = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**_lowerCamelCase ).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : str = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowercase : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : str = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**_lowerCamelCase ).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : Optional[Any] = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowercase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**_lowerCamelCase ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : Dict = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowercase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Tuple = self.get_dummy_inputs()
_lowercase : Dict = pipe(**_lowerCamelCase ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowercase : Optional[Any] = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Tuple = 3 * [inputs['prompt']]
# forward
_lowercase : Optional[int] = pipe(**_lowerCamelCase )
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = 3 * [inputs.pop('prompt' )]
_lowercase : str = pipe.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='np' , )
_lowercase : str = text_inputs['input_ids']
_lowercase : Optional[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_lowercase : List[str] = prompt_embeds
# forward
_lowercase : Dict = pipe(**_lowerCamelCase )
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Tuple = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Dict = 3 * [inputs['prompt']]
# forward
_lowercase : Dict = pipe(**_lowerCamelCase )
_lowercase : Dict = output.images[0, -3:, -3:, -1]
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt' )]
_lowercase : int = []
for p in [prompt, negative_prompt]:
_lowercase : str = pipe.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='np' , )
_lowercase : List[str] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_lowercase , _lowercase : str = embeds
# forward
_lowercase : str = pipe(**_lowerCamelCase )
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : int = ort.SessionOptions()
_lowercase : List[str] = False
return options
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Tuple = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_lowercase : str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_lowercase : Union[str, Any] = output.images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : int = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Optional[int] = 'open neural network exchange'
_lowercase : Union[str, Any] = np.random.RandomState(0 )
_lowercase : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type='np' )
_lowercase : List[str] = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Tuple = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Optional[int] = 'open neural network exchange'
_lowercase : List[str] = np.random.RandomState(0 )
_lowercase : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type='np' )
_lowercase : str = output.images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Optional[Any] = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = 0
def test_callback_fn(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : np.ndarray ) -> None:
_lowercase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Tuple = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Tuple = latents[0, -3:, -3:, -1]
_lowercase : List[Any] = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_lowercase : Dict = False
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : Dict = np.random.RandomState(0 )
pipe(
prompt=_lowerCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert pipe.safety_checker is None
_lowercase : Any = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase )
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : Optional[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ = KandinskyVaaInpaintPipeline
A_ = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ = False
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowercase : List[Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : Tuple = self.dummy_unet
_lowercase : List[str] = self.dummy_movq
_lowercase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__lowerCamelCase , )
_lowercase : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_lowercase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
_lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowercase : int = np.ones((64, 64) , dtype=np.floataa )
_lowercase : List[Any] = 0
if str(__lowerCamelCase ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(__lowerCamelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_lowercase : Optional[int] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = '''cpu'''
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
_lowercase : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_lowercase : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
_lowercase : Optional[Any] = output.images
_lowercase : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowercase : Dict = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowercase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = np.ones((768, 768) , dtype=np.floataa )
_lowercase : List[Any] = 0
_lowercase : Any = '''a hat'''
_lowercase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
_lowercase : str = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowercase : Union[str, Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
_lowercase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Optional[Any] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
image=__lowerCamelCase , mask_image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowercase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=sys.maxsize ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = 'bilinear'
_lowercase : str = max_size
_lowercase : List[str] = short_edge_length
def __call__( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = []
for img in imgs:
_lowercase , _lowercase : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_lowercase : Tuple = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowercase : List[Any] = size * 1.0 / min(_lowercase , _lowercase )
if h < w:
_lowercase , _lowercase : str = size, scale * w
else:
_lowercase , _lowercase : Optional[Any] = scale * h, size
if max(_lowercase , _lowercase ) > self.max_size:
_lowercase : Any = self.max_size * 1.0 / max(_lowercase , _lowercase )
_lowercase : str = newh * scale
_lowercase : Dict = neww * scale
_lowercase : Union[str, Any] = int(neww + 0.5 )
_lowercase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
_lowercase : Tuple = Image.fromarray(_lowercase )
_lowercase : Tuple = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowercase : Optional[Any] = np.asarray(_lowercase )
else:
_lowercase : Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowercase : List[str] = nn.functional.interpolate(
_lowercase , (newh, neww) , mode=self.interp_method , align_corners=_lowercase ).squeeze(0 )
img_augs.append(_lowercase )
return img_augs
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : int ) -> str:
'''simple docstring'''
_lowercase : str = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowercase : Optional[int] = cfg.INPUT.FORMAT
_lowercase : Union[str, Any] = cfg.SIZE_DIVISIBILITY
_lowercase : int = cfg.PAD_VALUE
_lowercase : Tuple = cfg.INPUT.MAX_SIZE_TEST
_lowercase : Union[str, Any] = cfg.MODEL.DEVICE
_lowercase : str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase : Tuple = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = tuple(max(_lowercase ) for s in zip(*[img.shape for img in images] ) )
_lowercase : int = [im.shape[-2:] for im in images]
_lowercase : int = [
nn.functional.pad(
_lowercase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_lowercase , _lowercase )
]
return torch.stack(_lowercase ), torch.tensor(_lowercase )
def __call__( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=False ) -> str:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowercase , _lowercase ):
_lowercase : Optional[Any] = [images]
if single_image:
assert len(_lowercase ) == 1
for i in range(len(_lowercase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_lowercase , images.pop(_lowercase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_lowercase , torch.as_tensor(img_tensorize(images.pop(_lowercase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowercase : List[Any] = torch.tensor([im.shape[:2] for im in images] )
_lowercase : Optional[Any] = self.aug(_lowercase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowercase : Tuple = [self.normalizer(_lowercase ) for x in images]
# now pad them to do the following operations
_lowercase , _lowercase : Any = self.pad(_lowercase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowercase : List[str] = torch.true_divide(_lowercase , _lowercase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
_lowercase , _lowercase : str = box_size
tensor[:, 0].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0, max=UpperCamelCase__ )
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Union[str, Any] =pd.read_csv('''sample_data.csv''', header=None)
_A : Tuple =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : Optional[int] =df.iloc[:, 1:2]
_A : List[Any] =actual_data.values.reshape(len_data, 1)
_A : str =MinMaxScaler().fit_transform(actual_data)
_A : List[Any] =1_0
_A : Union[str, Any] =5
_A : List[str] =2_0
_A : Dict =len_data - periods * look_back
_A : List[str] =actual_data[:division]
_A : Tuple =actual_data[division - look_back :]
_A , _A : Dict =[], []
_A , _A : Optional[int] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[str] =np.array(train_x)
_A : Optional[Any] =np.array(test_x)
_A : int =np.array([list(i.ravel()) for i in train_y])
_A : List[str] =np.array([list(i.ravel()) for i in test_y])
_A : Union[str, Any] =Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : List[Any] =model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
_A : Optional[Any] =model.predict(x_test)
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_A : str ={
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_A : Union[str, Any] ="hopper-medium-v2"
_A : str =gym.make(env_name)
_A : Any =ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
_A : List[Any] =env.reset()
_A : Any =0
_A : Dict =0
_A : Optional[int] =1_0_0_0
_A : int =[obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_A : Optional[int] =pipeline(obs, planning_horizon=3_2)
# execute action in environment
_A : List[Any] =env.step(denorm_actions)
_A : Optional[Any] =env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_A : Dict =next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
return 1 if input_a == input_a else 0
def __UpperCamelCase ( ) -> None:
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : str = "▁" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[str, AddedToken] = "<unk>" , UpperCamelCase_ : Union[str, AddedToken] = "</s>" , UpperCamelCase_ : Union[str, AddedToken] = "<pad>" , ) -> Any:
'''simple docstring'''
_lowercase : Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
_lowercase : Tuple = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_lowercase : str = token_dict['token']
_lowercase : Optional[Any] = Tokenizer(Unigram() )
_lowercase : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
_lowercase : str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_lowercase : int = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_lowercase : int = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
_lowercase : Tuple = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 8000 , UpperCamelCase_ : bool = True , ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowercase : str = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase_ : int = 8000 , UpperCamelCase_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = json.loads(self._tokenizer.to_str() )
_lowercase : int = self.special_tokens['unk']['id']
_lowercase : Optional[int] = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_A : Optional[Any] =re.compile(r'''\s+''')
def __UpperCamelCase ( _lowercase ) -> List[str]:
return {"hash": hashlib.mda(re.sub(_lowercase, '', example['content'] ).encode('utf-8' ) ).hexdigest()}
def __UpperCamelCase ( _lowercase ) -> Dict:
_lowercase : Tuple = [len(_lowercase ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(_lowercase ), "line_max": max(_lowercase )}
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : int = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[Any]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def __UpperCamelCase ( _lowercase, _lowercase=5 ) -> Tuple:
_lowercase : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_lowercase : Any = example["""content"""].splitlines()
for _, line in zip(range(_lowercase ), _lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCamelCase ( _lowercase, _lowercase=5, _lowercase=0.0_5 ) -> List[str]:
_lowercase : Optional[int] = ["""unit tests""", """test file""", """configuration file"""]
_lowercase : Tuple = example["""content"""].splitlines()
_lowercase : List[str] = 0
_lowercase : Dict = 0
# first test
for _, line in zip(range(_lowercase ), _lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowercase : Optional[Any] = example["""content"""].count('\n' )
_lowercase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
_lowercase : Any = ["""def """, """class """, """for """, """while """]
_lowercase : Any = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCamelCase ( _lowercase, _lowercase=4 ) -> Tuple:
_lowercase : int = example["""content"""].splitlines()
_lowercase : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : List[str] = tokenizer(example['content'], truncation=_lowercase )["""input_ids"""]
_lowercase : str = len(example['content'] ) / len(_lowercase )
return {"ratio": ratio}
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Tuple = {}
results.update(get_hash(_lowercase ) )
results.update(line_stats(_lowercase ) )
results.update(alpha_stats(_lowercase ) )
results.update(char_token_ratio(_lowercase ) )
results.update(is_autogenerated(_lowercase ) )
results.update(is_config_or_test(_lowercase ) )
results.update(has_no_keywords(_lowercase ) )
results.update(has_few_assignments(_lowercase ) )
return results
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
if not check_uniques(_lowercase, _lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
with open(_lowercase, 'rb' ) as f_in:
with gzip.open(str(_lowercase ) + '.gz', 'wb', compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowercase, _lowercase )
os.unlink(_lowercase )
# Settings
_A : Any =HfArgumentParser(PreprocessingArguments)
_A : Dict =parser.parse_args()
if args.num_workers is None:
_A : Any =multiprocessing.cpu_count()
_A : Any =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_A : List[Any] =time.time()
_A : Optional[int] =load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_A : List[Any] =time.time()
_A : Tuple =ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_A : List[Any] =set(ds.unique('''hash'''))
_A : List[str] =len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_A : Dict =time.time()
_A : List[str] =ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_A : List[Any] =time.time()
_A , _A : Optional[Any] =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_A : Tuple =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_A : Tuple =output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_A : Optional[int] =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_A : Union[str, Any] =str(data_dir / F'''file-{file_number+1:012}.json''')
_A : Dict =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowercase : Union[str, Any] = range(3, int(math.sqrt(lowerCAmelCase__ ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def __UpperCamelCase ( _lowercase, _lowercase=1, **_lowercase ) -> Tuple:
_lowercase : Union[str, Any] = factor * value
_lowercase : List[Any] = value
while not is_prime(lowerCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **lowerCAmelCase__ )
return value
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = 1, _lowercase = 1, _lowercase = 1.0E4, _lowercase = False, _lowercase = 1.0, ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_lowercase : str = float(embedding_dim // 2 )
_lowercase : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowercase : Optional[int] = min_timescale * jnp.exp(jnp.arange(_lowercase, dtype=jnp.floataa ) * -log_timescale_increment )
_lowercase : Dict = jnp.expand_dims(_lowercase, 1 ) * jnp.expand_dims(_lowercase, 0 )
# scale embeddings
_lowercase : List[str] = scale * emb
if flip_sin_to_cos:
_lowercase : Dict = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )], axis=1 )
else:
_lowercase : Optional[int] = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )], axis=1 )
_lowercase : Tuple = jnp.reshape(_lowercase, [jnp.shape(_lowercase )[0], embedding_dim] )
return signal
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
A_ = 32
A_ = jnp.floataa
@nn.compact
def __call__( self : Optional[Any] , UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
_lowercase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(lowerCamelCase_ )
_lowercase : List[Any] = nn.silu(lowerCamelCase_ )
_lowercase : List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(lowerCamelCase_ )
return temb
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
A_ = 32
A_ = False
A_ = 1
@nn.compact
def __call__( self : Dict , UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> int:
assert column_title.isupper()
_lowercase : int = 0
_lowercase : Optional[int] = len(_lowercase ) - 1
_lowercase : Optional[Any] = 0
while index >= 0:
_lowercase : Any = (ord(column_title[index] ) - 64) * pow(26, _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowercase : int = [True] * (num + 1)
_lowercase : int = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, __lowerCAmelCase ):
_lowercase : Dict = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Optional[int] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
from math import isqrt, loga
def __UpperCamelCase ( _lowercase ) -> list[int]:
_lowercase : int = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, a_, a_ ):
_lowercase : int = False
return [i for i in range(2, a_ ) if is_prime[i]]
def __UpperCamelCase ( _lowercase = 80_0800, _lowercase = 80_0800 ) -> int:
_lowercase : str = degree * loga(a_ )
_lowercase : Tuple = int(a_ )
_lowercase : int = calculate_prime_numbers(a_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : Dict = len(a_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(UpperCAmelCase__ ) * abs(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
_A : str =[0, 2, 4, 6, 8]
_A : List[Any] =[1, 3, 5, 7, 9]
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowercase : List[str] = 0
for digit in range(10 ):
_lowercase : List[Any] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, UpperCAmelCase__, UpperCAmelCase__ )
return result
_lowercase : List[Any] = 0
for digita in range(10 ):
_lowercase : Dict = digita
if (remainder + digita) % 2 == 0:
_lowercase : List[Any] = ODD_DIGITS
else:
_lowercase : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
_lowercase : Any = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, UpperCAmelCase__, UpperCAmelCase__, )
return result
def __UpperCamelCase ( _lowercase = 9 ) -> int:
_lowercase : Union[str, Any] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(UpperCAmelCase__, 0, [0] * length, UpperCAmelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , """Tatoeba directory does not exist.""" )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase , _lowercase : str = self.resolver.write_model_card('opus-mt-he-en' , dry_run=UpperCamelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase__ :
'''simple docstring'''
A_ = 42
# setable values
A_ = 42
A_ = 42
A_ = None
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class lowerCamelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
A_ = 42
class lowerCamelCase__ ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
A_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
A_ = 42
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Any , UpperCamelCase_ : Any = 1000 , UpperCamelCase_ : List[Any] = 0.00_01 , UpperCamelCase_ : Tuple = 0.02 , UpperCamelCase_ : Dict = "linear" , UpperCamelCase_ : int = None , UpperCamelCase_ : Tuple = "fixed_small" , UpperCamelCase_ : Dict = True , UpperCamelCase_ : Optional[int] = "epsilon" , UpperCamelCase_ : List[str] = jnp.floataa , ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = dtype
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[int] = None ) -> Tuple:
'''simple docstring'''
if common is None:
_lowercase : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_lowercase : Any = jnp.array(1.0 , dtype=self.dtype )
_lowercase : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] = None ) -> Optional[Any]:
'''simple docstring'''
return sample
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] = () ) -> List[str]:
'''simple docstring'''
_lowercase : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_lowercase : List[Any] = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None ) -> int:
'''simple docstring'''
_lowercase : Dict = state.common.alphas_cumprod[t]
_lowercase : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_lowercase : List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_lowercase : Tuple = jnp.clip(UpperCamelCase_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_lowercase : Union[str, Any] = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_lowercase : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_lowercase : Dict = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_lowercase : Union[str, Any] = variance
_lowercase : str = state.common.betas[t]
_lowercase : List[Any] = (predicted_variance + 1) / 2
_lowercase : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Union[str, Any] = True , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = timestep
if key is None:
_lowercase : List[str] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_lowercase , _lowercase : List[str] = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
_lowercase : Optional[int] = None
# 1. compute alphas, betas
_lowercase : Any = state.common.alphas_cumprod[t]
_lowercase : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_lowercase : List[Any] = 1 - alpha_prod_t
_lowercase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase : List[Any] = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_lowercase : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_lowercase : str = jax.random.split(UpperCamelCase_ , num=1 )
_lowercase : int = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
_lowercase : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_lowercase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , ) -> int:
'''simple docstring'''
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , ) -> int:
'''simple docstring'''
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self : int ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A : Optional[Any] =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=False, ) -> Tuple:
output_path.parent.mkdir(parents=_lowercase, exist_ok=_lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, use_external_data_format=_lowercase, enable_onnx_checker=_lowercase, opset_version=_lowercase, )
else:
export(
_lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, opset_version=_lowercase, )
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase = False ) -> Dict:
_lowercase : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : str = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_lowercase : int = '''cpu'''
_lowercase : Dict = StableDiffusionPipeline.from_pretrained(_lowercase, torch_dtype=_lowercase ).to(_lowercase )
_lowercase : str = Path(_lowercase )
# TEXT ENCODER
_lowercase : List[str] = pipeline.text_encoder.config.max_position_embeddings
_lowercase : Any = pipeline.text_encoder.config.hidden_size
_lowercase : Tuple = pipeline.tokenizer(
'A sample prompt', padding='max_length', max_length=pipeline.tokenizer.model_max_length, truncation=_lowercase, return_tensors='pt', )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=_lowercase, dtype=torch.intaa )), output_path=output_path / 'text_encoder' / 'model.onnx', ordered_input_names=['input_ids'], output_names=['last_hidden_state', 'pooler_output'], dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
}, opset=_lowercase, )
del pipeline.text_encoder
# UNET
_lowercase : Any = pipeline.unet.config.in_channels
_lowercase : Tuple = pipeline.unet.config.sample_size
_lowercase : Union[str, Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(2 ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(2, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=_lowercase, ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'], output_names=['out_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
}, opset=_lowercase, use_external_data_format=_lowercase, )
_lowercase : Any = str(unet_path.absolute().as_posix() )
_lowercase : Any = os.path.dirname(_lowercase )
_lowercase : Any = onnx.load(_lowercase )
# clean up existing tensor files
shutil.rmtree(_lowercase )
os.mkdir(_lowercase )
# collate external tensor files into one
onnx.save_model(
_lowercase, _lowercase, save_as_external_data=_lowercase, all_tensors_to_one_file=_lowercase, location='weights.pb', convert_attribute=_lowercase, )
del pipeline.unet
# VAE ENCODER
_lowercase : Dict = pipeline.vae
_lowercase : Optional[int] = vae_encoder.config.in_channels
_lowercase : Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : List[str] = lambda _lowercase, _lowercase : vae_encoder.encode(_lowercase, _lowercase )[0].sample()
onnx_export(
_lowercase, model_args=(
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=output_path / 'vae_encoder' / 'model.onnx', ordered_input_names=['sample', 'return_dict'], output_names=['latent_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=_lowercase, )
# VAE DECODER
_lowercase : List[Any] = pipeline.vae
_lowercase : Tuple = vae_decoder.config.latent_channels
_lowercase : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : Optional[Any] = vae_encoder.decode
onnx_export(
_lowercase, model_args=(
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=output_path / 'vae_decoder' / 'model.onnx', ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=_lowercase, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : int = pipeline.safety_checker
_lowercase : str = safety_checker.config.vision_config.num_channels
_lowercase : int = safety_checker.config.vision_config.image_size
_lowercase : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, _lowercase, _lowercase, _lowercase, ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
), output_path=output_path / 'safety_checker' / 'model.onnx', ordered_input_names=['clip_input', 'images'], output_names=['out_images', 'has_nsfw_concepts'], dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
}, opset=_lowercase, )
del pipeline.safety_checker
_lowercase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
_lowercase : List[str] = pipeline.feature_extractor
else:
_lowercase : Union[str, Any] = None
_lowercase : int = None
_lowercase : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ), scheduler=pipeline.scheduler, safety_checker=_lowercase, feature_extractor=_lowercase, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(_lowercase )
print('ONNX pipeline saved to', _lowercase )
del pipeline
del onnx_pipeline
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_lowercase, provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_A : List[str] =parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : Optional[int] ={
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
A_ = """nllb-moe"""
A_ = ["""past_key_values"""]
A_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , UpperCamelCase_ : Any=12_8112 , UpperCamelCase_ : Dict=1024 , UpperCamelCase_ : str=12 , UpperCamelCase_ : List[Any]=4096 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : Dict=4096 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : List[str]=0.05 , UpperCamelCase_ : Optional[int]=0.05 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : Union[str, Any]=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : List[Any]="float32" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : int=128 , UpperCamelCase_ : List[str]=64 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Tuple=0.0_01 , UpperCamelCase_ : Dict=0.0_01 , UpperCamelCase_ : List[Any]="all" , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Optional[Any]=1.0 , UpperCamelCase_ : List[Any]=0.2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = vocab_size
_lowercase : int = max_position_embeddings
_lowercase : Dict = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : int = encoder_layers
_lowercase : Union[str, Any] = encoder_attention_heads
_lowercase : List[Any] = decoder_ffn_dim
_lowercase : Tuple = decoder_layers
_lowercase : int = decoder_attention_heads
_lowercase : List[str] = dropout
_lowercase : List[Any] = attention_dropout
_lowercase : Dict = activation_dropout
_lowercase : Any = activation_function
_lowercase : Dict = init_std
_lowercase : int = encoder_layerdrop
_lowercase : str = decoder_layerdrop
_lowercase : Optional[Any] = use_cache
_lowercase : List[str] = encoder_layers
_lowercase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : List[str] = router_z_loss_coef
_lowercase : int = router_aux_loss_coef
_lowercase : List[Any] = decoder_sparse_step
_lowercase : List[Any] = encoder_sparse_step
_lowercase : List[str] = num_experts
_lowercase : Any = expert_capacity
_lowercase : Dict = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowercase : int = router_dtype
_lowercase : Optional[Any] = router_ignore_padding_tokens
_lowercase : Optional[Any] = batch_prioritized_routing
_lowercase : List[str] = second_expert_policy
_lowercase : int = normalize_router_prob_before_dropping
_lowercase : List[Any] = moe_eval_capacity_token_fraction
_lowercase : int = moe_token_dropout
_lowercase : List[str] = output_router_logits
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
import os
import sys
_A : List[Any] =os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_A : Any =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> List[str]:
return AutoConfig.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> List[Any]:
return AutoTokenizer.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> int:
return AutoModel.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> Optional[Any]:
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> int:
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> Any:
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCamelCase ( *_lowercase, **_lowercase ) -> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A : Optional[int] ={'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCamelCase ( ) -> Optional[int]:
_lowercase : Optional[Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
_lowercase : Tuple = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
return image
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : int = dct.pop(_lowercase )
_lowercase : int = val
def __UpperCamelCase ( _lowercase, _lowercase ) -> Any:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowercase : Optional[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
_lowercase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_lowercase : int = torch.cat((q_bias, torch.zeros_like(_lowercase, requires_grad=_lowercase ), v_bias) )
_lowercase : Union[str, Any] = qkv_bias
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : int = 364 if 'coco' in model_name else 224
_lowercase : int = InstructBlipVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowercase : Dict = TaConfig.from_pretrained('google/flan-t5-xl', dense_act_fn='gelu', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowercase : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xxl', dense_act_fn='gelu', bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowercase : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf', vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
_lowercase : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf', vocab_size=3_2001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowercase : Optional[int] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
_lowercase : Optional[int] = InstructBlipConfig(vision_config=_lowercase, text_config=_lowercase, qformer_config=_lowercase )
return config, image_size
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase=None, _lowercase=False ) -> Optional[Any]:
_lowercase : List[Any] = AutoTokenizer.from_pretrained('bert-base-uncased', truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
_lowercase : List[str] = TaTokenizerFast.from_pretrained('google/flan-t5-xl', truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowercase : List[Any] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b', truncation_side='left', bos_token='</s>', unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
_lowercase , _lowercase : int = get_blipa_config(_lowercase )
_lowercase : List[Any] = InstructBlipForConditionalGeneration(_lowercase ).eval()
_lowercase : Dict = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
_lowercase , _lowercase : Dict = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_lowercase : Optional[int] = 'cuda:1' if torch.cuda.is_available() else 'cpu'
_lowercase : str = 'cuda:2' if torch.cuda.is_available() else 'cpu'
_lowercase , _lowercase , _lowercase : Any = load_model_and_preprocess(
name=_lowercase, model_type=_lowercase, is_eval=_lowercase, device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
_lowercase : Optional[int] = original_model.state_dict()
_lowercase : Optional[int] = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowercase : List[Any] = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
_lowercase : List[Any] = key.replace('Qformer.bert', 'qformer' )
if "attention.self" in key:
_lowercase : Tuple = key.replace('self', 'attention' )
if "llm_proj" in key:
_lowercase : int = key.replace('llm_proj', 'language_projection' )
if "t5_proj" in key:
_lowercase : List[str] = key.replace('t5_proj', 'language_projection' )
if key.startswith('llm_model' ):
_lowercase : List[Any] = key.replace('llm_model', 'language_model' )
if key.startswith('t5' ):
_lowercase : Union[str, Any] = key.replace('t5', 'language' )
_lowercase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowercase, _lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowercase, strict=_lowercase )
_lowercase : Any = load_demo_image()
_lowercase : Tuple = 'What is unusual about this image?'
# create processor
_lowercase : Union[str, Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size}, image_mean=_lowercase, image_std=_lowercase )
_lowercase : Union[str, Any] = InstructBlipProcessor(
image_processor=_lowercase, tokenizer=_lowercase, qformer_tokenizer=_lowercase, )
_lowercase : Tuple = processor(images=_lowercase, text=_lowercase, return_tensors='pt' ).to(_lowercase )
# make sure processor creates exact same pixel values
_lowercase : int = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
_lowercase : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "vicuna" in model_name:
_lowercase : Optional[int] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
_lowercase : List[Any] = hf_model(**_lowercase ).logits
else:
_lowercase : str = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
_lowercase : str = tokenizer('\n', return_tensors='pt' ).input_ids.to(_lowercase )
_lowercase : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100 )
_lowercase : Any = hf_model(**_lowercase, labels=_lowercase ).logits
print('First values of original logits:', original_logits[0, :3, :3] )
print('First values of HF logits:', logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowercase : Optional[int] = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), _lowercase, atol=_lowercase )
print('Looks ok!' )
print('Generating with original model...' )
_lowercase : str = original_model.generate({'image': original_pixel_values, 'prompt': prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
_lowercase : Tuple = hf_model.generate(
**_lowercase, do_sample=_lowercase, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowercase : int = 2
print('Original generation:', _lowercase )
_lowercase : Any = processor.batch_decode(_lowercase, skip_special_tokens=_lowercase )
_lowercase : str = [text.strip() for text in output_text]
print('HF generation:', _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
_A : Optional[int] =[
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_A : Any =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_A : Union[str, Any] =logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_A : List[Any] ={
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_A : Optional[int] ={
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_A : Any =sorted(arg_to_scheduler.keys())
_A : Dict ='''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class lowerCamelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : argparse.Namespace , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[str]="base" , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase_ )
_lowercase : List[Any] = 0
_lowercase : Any = Path(self.hparams.output_dir )
_lowercase : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_lowercase : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase_ , **UpperCamelCase_ , )
else:
_lowercase : PretrainedConfig = config
_lowercase : Dict = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(self.config , UpperCamelCase_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase_ , getattr(self.hparams , UpperCamelCase_ ) )
if tokenizer is None:
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase_ , )
else:
_lowercase : PreTrainedTokenizer = tokenizer
_lowercase : Any = MODEL_MODES[mode]
if model is None:
_lowercase : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase_ , )
else:
_lowercase : str = model
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : str ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = self.model_type.from_pretrained(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
_lowercase : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_lowercase : Optional[int] = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.model
_lowercase : int = ['bias', 'LayerNorm.weight']
_lowercase : int = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_lowercase : Dict = Adafactor(
UpperCamelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase_ , relative_step=UpperCamelCase_ )
else:
_lowercase : str = AdamW(
UpperCamelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_lowercase : Any = optimizer
_lowercase : str = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
return self.validation_step(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str ) -> str:
'''simple docstring'''
return self.validation_end(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_lowercase : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
if stage == "test":
_lowercase : str = len(self.test_dataloader().dataset )
else:
_lowercase : Union[str, Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=UpperCamelCase_ )
_lowercase : str = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.train_loader
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
UpperCamelCase_ , list(filter(UpperCamelCase_ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict[str, Any] ) -> None:
'''simple docstring'''
_lowercase : Tuple = self.output_dir.joinpath('best_tfmr' )
_lowercase : List[str] = self.step_count
self.model.save_pretrained(UpperCamelCase_ )
self.tokenizer.save_pretrained(UpperCamelCase_ )
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase_ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(UpperCamelCase_ ).parent / 'test_run' / 'cache' ) , type=UpperCamelCase_ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=UpperCamelCase_ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=UpperCamelCase_ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=UpperCamelCase_ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=UpperCamelCase_ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=UpperCamelCase_ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=UpperCamelCase_ , metavar=UpperCamelCase_ , type=UpperCamelCase_ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase_ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=UpperCamelCase_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCamelCase_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=UpperCamelCase_ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=UpperCamelCase_ )
parser.add_argument('--train_batch_size' , default=32 , type=UpperCamelCase_ )
parser.add_argument('--eval_batch_size' , default=32 , type=UpperCamelCase_ )
parser.add_argument('--adafactor' , action='store_true' )
class lowerCamelCase__ ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase__ ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase_ )
class lowerCamelCase__ ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = trainer.lr_schedulers[0]['scheduler']
_lowercase : Union[str, Any] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase_ )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> Any:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
_lowercase : Optional[int] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
_lowercase : int = trainer.callback_metrics
# Log and save results to file
_lowercase : List[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(UpperCamelCase_ , 'w' ) as writer:
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) )
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir', default=str(Path(_lowercase ).parent / 'test_run' / 'model_checkpoints' ), type=_lowercase, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument(
'--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit', )
parser.add_argument(
'--fp16_opt_level', type=_lowercase, default='O2', help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
), )
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=_lowercase )
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=_lowercase, help='Max gradient norm' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_predict', action='store_true', help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps', dest='accumulate_grad_batches', type=_lowercase, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--seed', type=_lowercase, default=42, help='random seed for initialization' )
parser.add_argument(
'--data_dir', default=str(Path(_lowercase ).parent / 'test_run' / 'dummy-train-data' ), type=_lowercase, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.', )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=None, _lowercase=True, _lowercase=[], _lowercase=None, _lowercase=None, **_lowercase, ) -> int:
pl.seed_everything(args.seed )
# init model
_lowercase : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowercase )
# add custom checkpoints
if checkpoint_callback is None:
_lowercase : str = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix='checkpoint', monitor='val_loss', mode='min', save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowercase )
if logging_callback is None:
_lowercase : str = LoggingCallback()
_lowercase : Optional[int] = {}
if args.fpaa:
_lowercase : Any = 16
if args.gpus > 1:
_lowercase : List[Any] = 'auto'
_lowercase : List[Any] = 'ddp'
_lowercase : Dict = args.accumulate_grad_batches
_lowercase : List[str] = None
_lowercase : List[str] = 'auto'
_lowercase : Any = pl.Trainer.from_argparse_args(
_lowercase, weights_summary=_lowercase, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=_lowercase, val_check_interval=1, num_sanity_val_steps=2, **_lowercase, )
if args.do_train:
trainer.fit(_lowercase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowercase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowercase : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase : Any = CLIPTextModel(UpperCamelCase_ )
_lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : List[Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=0 ) -> Dict:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Union[str, Any] = 2
_lowercase : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , )
_lowercase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : str = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
_lowercase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase_ : Optional[int] ):
if isinstance(UpperCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowercase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
_lowercase : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
_lowercase : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase : Optional[int] = CLIPTextModel(UpperCamelCase_ )
_lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : Tuple = MultiControlNetModel([controlneta, controlneta] )
_lowercase : Any = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=0 ) -> int:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : List[Any] = 2
_lowercase : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
]
_lowercase : int = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
_lowercase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
_lowercase : Tuple = 10.0
_lowercase : List[Any] = 4
_lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Union[str, Any] = steps
_lowercase : Optional[int] = scale
_lowercase : Optional[int] = pipe(**UpperCamelCase_ )[0]
_lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Tuple = steps
_lowercase : Any = scale
_lowercase : List[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowercase : int = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Optional[int] = steps
_lowercase : Any = scale
_lowercase : Dict = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : List[Any] = steps
_lowercase : List[Any] = scale
_lowercase : Optional[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowercase : int = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowercase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Union[str, Any] = 'evil space-punk bird'
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_lowercase : str = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_lowercase : str = pipe(
UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_lowercase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_lowercase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """gpt_bigcode"""
A_ = ["""past_key_values"""]
A_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase_ : List[str]=5_0257 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : str=None , UpperCamelCase_ : int="gelu_pytorch_tanh" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[Any]=1E-5 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=5_0256 , UpperCamelCase_ : Tuple=5_0256 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=True , **UpperCamelCase_ : str , ) -> str:
'''simple docstring'''
_lowercase : List[str] = vocab_size
_lowercase : Optional[int] = n_positions
_lowercase : Any = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Optional[Any] = n_head
_lowercase : Dict = n_inner
_lowercase : Dict = activation_function
_lowercase : Union[str, Any] = resid_pdrop
_lowercase : Union[str, Any] = embd_pdrop
_lowercase : str = attn_pdrop
_lowercase : Optional[int] = layer_norm_epsilon
_lowercase : List[Any] = initializer_range
_lowercase : Any = scale_attn_weights
_lowercase : Any = use_cache
_lowercase : int = attention_softmax_in_fpaa
_lowercase : Optional[Any] = scale_attention_softmax_in_fpaa
_lowercase : str = multi_query
_lowercase : List[Any] = bos_token_id
_lowercase : Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
_A : int ='''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_A : int =BASE_URL + '''/user'''
# https://github.com/settings/tokens
_A : List[Any] =os.environ.get('''USER_TOKEN''', '''''')
def __UpperCamelCase ( _lowercase ) -> dict[Any, Any]:
_lowercase : int = {
'Authorization': f'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_lowercase, headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import os
def __UpperCamelCase ( _lowercase = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(_lowercase ), _lowercase ) ) as input_file:
_lowercase : Optional[Any] = [
[int(_lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase : Any = len(_lowercase )
_lowercase : Dict = len(matrix[0] )
_lowercase : Tuple = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
_lowercase : Union[str, Any] = matrix[i][0]
for j in range(1, _lowercase ):
for i in range(_lowercase ):
_lowercase : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, _lowercase ):
_lowercase : List[str] = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
_lowercase : Optional[Any] = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 42
class lowerCamelCase__ ( A , A ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : float = 0.1_82_15 , UpperCamelCase_ : str = "group" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowercase : List[str] = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
_lowercase : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : Tuple = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
_lowercase : List[str] = VectorQuantizer(UpperCamelCase_ , UpperCamelCase_ , beta=0.25 , remap=UpperCamelCase_ , sane_index_shape=UpperCamelCase_ )
_lowercase : Optional[Any] = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
# pass init params to Decoder
_lowercase : Tuple = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , norm_type=UpperCamelCase_ , )
@apply_forward_hook
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_lowercase : List[str] = self.encoder(UpperCamelCase_ )
_lowercase : Optional[int] = self.quant_conv(UpperCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase_ )
@apply_forward_hook
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
_lowercase : int = self.quantize(UpperCamelCase_ )
else:
_lowercase : Optional[Any] = h
_lowercase : Tuple = self.post_quant_conv(UpperCamelCase_ )
_lowercase : List[str] = self.decoder(UpperCamelCase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_lowercase : Any = sample
_lowercase : Optional[int] = self.encode(UpperCamelCase_ ).latents
_lowercase : List[Any] = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> int:
if not nums:
return 0
_lowercase : Tuple = nums[0]
_lowercase : Optional[int] = 0
for num in nums[1:]:
_lowercase : int = (
max_excluding + num,
max(_lowercase, _lowercase ),
)
return max(_lowercase, _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_A : List[Any] ='''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowercase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowercase : str = []
for duplicate_key in duplicates:
_lowercase : Dict = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_lowercase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_lowercase, key=lambda _lowercase : s["title"].lower() )
def __UpperCamelCase ( _lowercase=False ) -> str:
with open(_lowercase, encoding='utf-8' ) as f:
_lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : List[Any] = content[api_idx]['sections']
# Then to the model doc
_lowercase : Union[str, Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowercase : Optional[int] = api_doc[model_idx]['sections']
_lowercase : List[str] = [(idx, section) for idx, section in enumerate(_lowercase ) if 'sections' in section]
_lowercase : Any = False
for idx, modality_doc in modalities_docs:
_lowercase : Optional[Any] = modality_doc['sections']
_lowercase : Any = clean_model_doc_toc(_lowercase )
if old_modality_doc != new_modality_doc:
_lowercase : Any = True
if overwrite:
_lowercase : List[Any] = new_modality_doc
if diff:
if overwrite:
_lowercase : Any = model_doc
_lowercase : Optional[int] = api_doc
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(_lowercase, allow_unicode=_lowercase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A : Optional[int] =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> bool:
if len(_lowercase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : int = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_A : str =tuple[int, int]
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : set[int] , UpperCamelCase_ : Mapping[EdgeT, int] ) -> None:
'''simple docstring'''
_lowercase : set[int] = vertices
_lowercase : dict[EdgeT, int] = {
(min(UpperCamelCase_ ), max(UpperCamelCase_ )): weight for edge, weight in edges.items()
}
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : EdgeT , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowercase : List[Any] = weight
def __UpperCAmelCase ( self : Any ) -> Graph:
'''simple docstring'''
_lowercase : Graph = Graph({min(self.vertices )} , {} )
_lowercase : EdgeT
_lowercase : int
_lowercase : EdgeT
_lowercase : int
while len(subgraph.vertices ) < len(self.vertices ):
_lowercase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowercase : Optional[Any] = edge
_lowercase : List[Any] = weight
subgraph.add_edge(UpperCamelCase_ , UpperCamelCase_ )
return subgraph
def __UpperCamelCase ( _lowercase = "p107_network.txt" ) -> int:
_lowercase : str = os.path.abspath(os.path.dirname(_lowercase ) )
_lowercase : str = os.path.join(_lowercase, _lowercase )
_lowercase : dict[EdgeT, int] = {}
_lowercase : list[str]
_lowercase : int
_lowercase : int
with open(_lowercase ) as f:
_lowercase : List[str] = f.read().strip().split('\n' )
_lowercase : Optional[int] = [line.split(',' ) for line in data]
for edgea in range(1, len(_lowercase ) ):
for edgea in range(_lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowercase : Dict = int(adjaceny_matrix[edgea][edgea] )
_lowercase : Graph = Graph(set(range(len(_lowercase ) ) ), _lowercase )
_lowercase : Graph = graph.prims_algorithm()
_lowercase : int = sum(graph.edges.values() )
_lowercase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.