code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__snake_case : List[Any] =logging.get_logger(__name__)
__snake_case : Dict ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[int] =[
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__snake_case : Optional[Any] ={
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : Tuple ={f"""funnel-transformer/{name}""": 5_1_2 for name in _model_names}
__snake_case : List[str] ={f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_INIT_CONFIGURATION
snake_case_ =FunnelTokenizer
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =2
def __init__(self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<sep>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="<cls>" ,__lowerCamelCase="<mask>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase="##" ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,do_lower_case=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,clean_text=__lowerCamelCase ,tokenize_chinese_chars=__lowerCamelCase ,strip_accents=__lowerCamelCase ,wordpieces_prefix=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Union[str, Any] = getattr(__lowerCamelCase ,normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : Union[str, Any] = strip_accents
lowerCAmelCase__ : Optional[int] = tokenize_chinese_chars
lowerCAmelCase__ : Any = normalizer_class(**__lowerCamelCase )
lowerCAmelCase__ : Any = do_lower_case
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self._tokenizer.model.save(__lowerCamelCase ,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 647 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =KandinskyVaaPipeline
snake_case_ =[
"""image_embeds""",
"""negative_image_embeds""",
]
snake_case_ =["""image_embeds""", """negative_image_embeds"""]
snake_case_ =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ =False
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return 1_00
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ : int = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.dummy_unet
lowerCAmelCase__ : int = self.dummy_movq
lowerCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=__lowerCamelCase ,)
lowerCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Dict = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Any = '''cpu'''
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
lowerCAmelCase__ : str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : int = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : List[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) ,return_dict=__lowerCamelCase ,)[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCAmelCase__ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' ,torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : str = '''red cat, 4k photo'''
lowerCAmelCase__ : Dict = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = pipe_prior(
__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowerCAmelCase__ : int = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ : Any = pipeline(
image_embeds=__lowerCamelCase ,negative_image_embeds=__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=1_00 ,output_type='''np''' ,)
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__lowerCamelCase ,__lowerCamelCase )
| 647 | 1 |
'''simple docstring'''
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowerCamelCase : Optional[Any] = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
__lowerCamelCase : List[str] = int(sequence[i], 2 )
return sequence
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowerCamelCase : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowerCamelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
__lowerCamelCase : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowerCamelCase : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowerCamelCase : Optional[Any] = """1""" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase () -> str:
"""simple docstring"""
__lowerCamelCase : Any = HfArgumentParser(_lowercase )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Dict = TensorFlowBenchmark(args=_lowercase )
try:
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : Tuple = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__lowerCamelCase : List[Any] = """ """.join(str(_lowercase ).split(""" """ )[:-1] )
__lowerCamelCase : Tuple = """"""
__lowerCamelCase : List[str] = eval(str(_lowercase ).split(""" """ )[-1] )
__lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
__lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 483 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __UpperCamelCase ( lowercase__ ):
def __init__( self :Union[str, Any] ):
snake_case_ : List[Any] = []
def a__ ( self :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :str ,_UpperCamelCase :int ,**_UpperCamelCase :List[Any] ):
self.events.append("""on_init_end""" )
def a__ ( self :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :List[Any] ,**_UpperCamelCase :Optional[Any] ):
self.events.append("""on_train_begin""" )
def a__ ( self :Dict ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :List[str] ,**_UpperCamelCase :int ):
self.events.append("""on_train_end""" )
def a__ ( self :str ,_UpperCamelCase :str ,_UpperCamelCase :Tuple ,_UpperCamelCase :Any ,**_UpperCamelCase :Optional[Any] ):
self.events.append("""on_epoch_begin""" )
def a__ ( self :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :List[Any] ,**_UpperCamelCase :Optional[Any] ):
self.events.append("""on_step_begin""" )
def a__ ( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :Dict ,_UpperCamelCase :List[Any] ,**_UpperCamelCase :List[str] ):
self.events.append("""on_step_end""" )
def a__ ( self :List[str] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :str ,**_UpperCamelCase :str ):
self.events.append("""on_evaluate""" )
def a__ ( self :str ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Dict ,**_UpperCamelCase :Optional[int] ):
self.events.append("""on_predict""" )
def a__ ( self :List[str] ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :Tuple ,**_UpperCamelCase :int ):
self.events.append("""on_save""" )
def a__ ( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :Optional[int] ,**_UpperCamelCase :List[Any] ):
self.events.append("""on_log""" )
def a__ ( self :str ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[str] ,**_UpperCamelCase :Optional[Any] ):
self.events.append("""on_prediction_step""" )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
def a__ ( self :Dict ):
shutil.rmtree(self.output_dir )
def a__ ( self :Optional[Any] ,_UpperCamelCase :List[Any]=0 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :Optional[int]=6_4 ,_UpperCamelCase :int=6_4 ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Optional[int]=False ,**_UpperCamelCase :int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : List[str] = RegressionDataset(length=_UpperCamelCase )
snake_case_ : int = RegressionDataset(length=_UpperCamelCase )
snake_case_ : Optional[int] = RegressionModelConfig(a=_UpperCamelCase ,b=_UpperCamelCase )
snake_case_ : List[str] = RegressionPreTrainedModel(_UpperCamelCase )
snake_case_ : Tuple = TrainingArguments(self.output_dir ,disable_tqdm=_UpperCamelCase ,report_to=[] ,**_UpperCamelCase )
return Trainer(
_UpperCamelCase ,_UpperCamelCase ,train_dataset=_UpperCamelCase ,eval_dataset=_UpperCamelCase ,callbacks=_UpperCamelCase ,)
def a__ ( self :List[str] ,_UpperCamelCase :str ,_UpperCamelCase :Dict ):
self.assertEqual(len(_UpperCamelCase ) ,len(_UpperCamelCase ) )
# Order doesn't matter
snake_case_ : Union[str, Any] = sorted(_UpperCamelCase ,key=lambda _UpperCamelCase : cb.__name__ if isinstance(_UpperCamelCase ,_UpperCamelCase ) else cb.__class__.__name__ )
snake_case_ : Dict = sorted(_UpperCamelCase ,key=lambda _UpperCamelCase : cb.__name__ if isinstance(_UpperCamelCase ,_UpperCamelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCamelCase ,_UpperCamelCase ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and isinstance(_UpperCamelCase ,_UpperCamelCase ):
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ) and not isinstance(_UpperCamelCase ,_UpperCamelCase ):
self.assertEqual(_UpperCamelCase ,cba.__class__ )
elif not isinstance(_UpperCamelCase ,_UpperCamelCase ) and isinstance(_UpperCamelCase ,_UpperCamelCase ):
self.assertEqual(cba.__class__ ,_UpperCamelCase )
else:
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :List[str] ):
snake_case_ : int = ["""on_init_end""", """on_train_begin"""]
snake_case_ : int = 0
snake_case_ : Tuple = len(trainer.get_eval_dataloader() )
snake_case_ : List[str] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(_UpperCamelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
# Callbacks passed at init are added to the default callbacks
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[Any] = self.get_trainer(disable_tqdm=_UpperCamelCase )
snake_case_ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCamelCase )
expected_callbacks.remove(_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
snake_case_ : Any = self.get_trainer()
snake_case_ : List[Any] = trainer.pop_callback(_UpperCamelCase )
self.assertEqual(cb.__class__ ,_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
trainer.add_callback(_UpperCamelCase )
expected_callbacks.insert(0 ,_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
# We can also add, pop, or remove by instance
snake_case_ : Optional[Any] = self.get_trainer()
snake_case_ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCamelCase )
expected_callbacks.remove(_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
snake_case_ : Tuple = self.get_trainer()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[0]
snake_case_ : str = trainer.pop_callback(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
trainer.add_callback(_UpperCamelCase )
expected_callbacks.insert(0 ,_UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCamelCase )
def a__ ( self :List[str] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" ,category=_UpperCamelCase )
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
# Independent log/save/eval
snake_case_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
snake_case_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
snake_case_ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy="""steps""" )
trainer.train()
snake_case_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
snake_case_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy="""epoch""" )
trainer.train()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
# A bit of everything
snake_case_ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy="""steps""" ,)
trainer.train()
snake_case_ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCamelCase ,self.get_expected_events(_UpperCamelCase ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case_ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(_UpperCamelCase ) in warn_mock.call_args[0][0] | 334 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : str = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
snake_case_ : Tuple = True
while ask_again:
snake_case_ : Any = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any]=[] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Union[str, Any]=0 ):
'''simple docstring'''
snake_case_ : List[str] = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : int = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = int(lowerCamelCase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = int(lowerCamelCase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def a__ ( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = super()._format_usage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = usage.replace("""<command> [<args>] """ ,"""""" )
return usage | 334 | 1 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Color current vertex
A_ = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
A_ = -1
return False
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [-1] * len(SCREAMING_SNAKE_CASE )
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 712 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowercase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ = '''lm_head'''
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
A_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
A_ = '''weight_g'''
elif "weight_v" in name:
A_ = '''weight_v'''
elif "bias" in name:
A_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = '''weight'''
else:
A_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = full_name.split('''conv_layers.''' )[-1]
A_ = name.split('''.''' )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
A_ = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ = Dictionary.load_from_json(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A_ = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ = 42
A_ = 43
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
A_ = True if config.feat_extract_norm == '''layer''' else False
A_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
A_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A_ = UniSpeechForCTC(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 563 | 0 |
_lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = True
lowerCamelCase__: Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def __lowerCAmelCase ( _UpperCamelCase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase__: List[Any] = len(_UpperCamelCase ) * [False]
lowerCamelCase__: dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowerCamelCase__: Tuple = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Tuple = []
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowerCamelCase__: Optional[Any] = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase__: Optional[Any] = find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 306 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__: List[str] = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
__lowerCamelCase = OPTConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Dict=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Any=False , __a : Tuple=99 , __a : Optional[int]=16 , __a : Any=2 , __a : Optional[Any]=4 , __a : Union[str, Any]=4 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : int=0.1 , __a : List[Any]=20 , __a : Tuple=2 , __a : str=1 , __a : str=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , ):
'''simple docstring'''
lowerCamelCase__: List[str] = parent
lowerCamelCase__: List[str] = batch_size
lowerCamelCase__: Dict = seq_length
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Dict = use_labels
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Union[str, Any] = hidden_size
lowerCamelCase__: Any = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Tuple = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: Union[str, Any] = hidden_dropout_prob
lowerCamelCase__: str = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: Tuple = eos_token_id
lowerCamelCase__: Any = pad_token_id
lowerCamelCase__: str = bos_token_id
lowerCamelCase__: Optional[int] = embed_dim
lowerCamelCase__: Union[str, Any] = word_embed_proj_dim
lowerCamelCase__: List[Any] = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__: Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase__: Optional[Any] = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = TFOPTModel(config=__a )
lowerCamelCase__: Optional[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__: Dict = input_ids[:1, :]
lowerCamelCase__: Any = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__: Any = 1
# first forward pass
lowerCamelCase__: str = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase__ , lowerCamelCase__: Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__: Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__: str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__: Any = model(__a , attention_mask=__a )[0]
lowerCamelCase__: Any = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 10
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[int] , __a : Dict ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase__: int = model_class(config=__a )
lowerCamelCase__: Tuple = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase__: str = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: List[str] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase__: Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase__: List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: List[Any] = False
self.assertTrue(__a )
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(_UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 99
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase__: Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase__: Any = input_ids.shape[0]
lowerCamelCase__: List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase__: List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__: Optional[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__: str = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase__: str = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase__: str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase__: Optional[int] = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__: Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__: Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__: Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase__: Dict = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase__: Any = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """facebook/opt-125m"""
lowerCamelCase__: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: Dict = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Any = model.generate(__a , max_length=10 )
lowerCamelCase__: Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
lowerCamelCase__: Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: Any = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase__: Tuple = """left"""
# use different length sentences to test batching
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__: List[Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase__: Any = inputs["""input_ids"""]
lowerCamelCase__: int = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase__: Optional[int] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[Any] = model.generate(input_ids=__a )
lowerCamelCase__: int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase__: Dict = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: str = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase__: List[str] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Dict = """facebook/opt-350m"""
lowerCamelCase__: Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Dict = []
lowerCamelCase__: int = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: str = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[int] = model.generate(__a , max_length=10 )
lowerCamelCase__: Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 306 | 1 |
from __future__ import annotations
from fractions import Fraction
def _snake_case ( __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def _snake_case ( __snake_case ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[str] = 1_1
UpperCAmelCase_ : Union[str, Any] = int("1" + "0" * digit_len )
for num in range(a_ , a_ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
UpperCAmelCase_ : Optional[Any] = 1_0
return solutions
def _snake_case ( __snake_case = 2 ) -> int:
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
UpperCAmelCase_ : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 718 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( __snake_case ) -> Any:
'''simple docstring'''
return TrainCommand(__snake_case )
class snake_case_ (lowercase__ ):
"""simple docstring"""
@staticmethod
def A_ ( lowercase):
"""simple docstring"""
UpperCAmelCase_ : str = parser.add_parser("train" ,help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data" ,type=lowercase ,required=lowercase ,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." ,)
train_parser.add_argument(
"--column_label" ,type=lowercase ,default=0 ,help="Column of the dataset csv file with example labels.")
train_parser.add_argument(
"--column_text" ,type=lowercase ,default=1 ,help="Column of the dataset csv file with example texts.")
train_parser.add_argument(
"--column_id" ,type=lowercase ,default=2 ,help="Column of the dataset csv file with example ids.")
train_parser.add_argument(
"--skip_first_row" ,action="store_true" ,help="Skip the first row of the csv file (headers).")
train_parser.add_argument("--validation_data" ,type=lowercase ,default="" ,help="path to validation dataset.")
train_parser.add_argument(
"--validation_split" ,type=lowercase ,default=0.1 ,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." ,)
train_parser.add_argument("--output" ,type=lowercase ,default="./" ,help="path to saved the trained model.")
train_parser.add_argument(
"--task" ,type=lowercase ,default="text_classification" ,help="Task to train the model on.")
train_parser.add_argument(
"--model" ,type=lowercase ,default="bert-base-uncased" ,help="Model's name or path to stored model.")
train_parser.add_argument("--train_batch_size" ,type=lowercase ,default=32 ,help="Batch size for training.")
train_parser.add_argument("--valid_batch_size" ,type=lowercase ,default=64 ,help="Batch size for validation.")
train_parser.add_argument("--learning_rate" ,type=lowercase ,default=3E-5 ,help="Learning rate.")
train_parser.add_argument("--adam_epsilon" ,type=lowercase ,default=1E-08 ,help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=lowercase)
def __init__( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = logging.get_logger("transformers-cli/training")
UpperCAmelCase_ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output ,exist_ok=lowercase)
UpperCAmelCase_ : Union[str, Any] = args.output
UpperCAmelCase_ : Optional[int] = args.column_label
UpperCAmelCase_ : Dict = args.column_text
UpperCAmelCase_ : Optional[Any] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""")
if args.task == "text_classification":
UpperCAmelCase_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""")
UpperCAmelCase_ : str = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
UpperCAmelCase_ : str = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""")
UpperCAmelCase_ : Union[str, Any] = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
UpperCAmelCase_ : List[str] = args.validation_split
UpperCAmelCase_ : Tuple = args.train_batch_size
UpperCAmelCase_ : Optional[int] = args.valid_batch_size
UpperCAmelCase_ : Dict = args.learning_rate
UpperCAmelCase_ : List[str] = args.adam_epsilon
def A_ ( self):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A_ ( self):
"""simple docstring"""
raise NotImplementedError
def A_ ( self):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 455 | 0 |
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowercase : int) -> None:
A_ = size
A_ = [0] * size
A_ = [0] * size
@staticmethod
def __snake_case ( _lowercase : int) -> int:
return index | (index + 1)
@staticmethod
def __snake_case ( _lowercase : int) -> int:
return (index & (index + 1)) - 1
def __snake_case ( self : int , _lowercase : int , _lowercase : int) -> None:
A_ = value
while index < self.size:
A_ = self.get_prev(_lowercase) + 1
if current_left_border == index:
A_ = value
else:
A_ = max(_lowercase , _lowercase , _lowercase)
A_ = self.get_next(_lowercase)
def __snake_case ( self : Tuple , _lowercase : int , _lowercase : int) -> int:
right -= 1 # Because of right is exclusive
A_ = 0
while left <= right:
A_ = self.get_prev(_lowercase)
if left <= current_left:
A_ = max(_lowercase , self.tree[right])
A_ = current_left
else:
A_ = max(_lowercase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , **_lowercase : List[Any]) -> Union[str, Any]:
super().__init__(**_lowercase)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str], "Image", List["Image"]] , **_lowercase : List[Any]) -> Any:
return super().__call__(_lowercase , **_lowercase)
def __snake_case ( self : int , **_lowercase : Union[str, Any]) -> Any:
A_ = {}
if "candidate_labels" in kwargs:
A_ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A_ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __snake_case ( self : List[str] , _lowercase : Tuple , _lowercase : Any=None , _lowercase : Optional[int]="This is a photo of {}.") -> Union[str, Any]:
A_ = load_image(_lowercase)
A_ = self.image_processor(images=[image] , return_tensors=self.framework)
A_ = candidate_labels
A_ = [hypothesis_template.format(_lowercase) for x in candidate_labels]
A_ = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase)
A_ = [text_inputs]
return inputs
def __snake_case ( self : Optional[int] , _lowercase : Tuple) -> Optional[int]:
A_ = model_inputs.pop('candidate_labels')
A_ = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , _lowercase):
A_ = text_inputs[0]
else:
# Batching case.
A_ = text_inputs[0][0]
A_ = self.model(**_lowercase , **_lowercase)
A_ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __snake_case ( self : List[str] , _lowercase : int) -> Optional[int]:
A_ = model_outputs.pop('candidate_labels')
A_ = model_outputs['logits'][0]
if self.framework == "pt":
A_ = logits.softmax(dim=-1).squeeze(-1)
A_ = probs.tolist()
if not isinstance(_lowercase , _lowercase):
A_ = [scores]
elif self.framework == "tf":
A_ = stable_softmax(_lowercase , axis=-1)
A_ = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
A_ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase) , key=lambda _lowercase: -x[0])
]
return result
| 366 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> str:
a : Dict = inspect.getfile(accelerate.test_utils )
a : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a : str = test_metrics
@require_cpu
def __a ( self ) -> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __a ( self ) -> Union[str, Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __a ( self ) -> int:
self.test_metrics.main()
@require_multi_gpu
def __a ( self ) -> int:
print(f"""Found {torch.cuda.device_count()} devices.""" )
a : Any = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
| 31 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( snake_case_ : Any ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= image.size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__: Tuple= image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
SCREAMING_SNAKE_CASE__: List[Any]= np.array(snake_case_ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__: Optional[int]= image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__: Dict= torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 100 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: str= 1
elif isinstance(lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__: int= image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase )}' )
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= preprocess(lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__: Union[str, Any]= (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__: Optional[Any]= next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__: Optional[int]= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image.to(device=self.device , dtype=lowerCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE__: Any= self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__: List[Any]= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__: Dict= '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__: str= {}
if accepts_eta:
SCREAMING_SNAKE_CASE__: List[Any]= eta
for t in self.progress_bar(lowerCAmelCase ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__: Dict= self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__: Any= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__: Tuple= self.vqvae.decode(lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Optional[int]= torch.clamp(lowerCAmelCase , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= image / 2 + 0.5
SCREAMING_SNAKE_CASE__: List[Any]= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__: Tuple= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 64 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
_SCREAMING_SNAKE_CASE : int = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
_SCREAMING_SNAKE_CASE : int = 0
while number > 0:
_SCREAMING_SNAKE_CASE : List[str] = number % 1_0
sum_of_digits += last_digit
_SCREAMING_SNAKE_CASE : List[str] = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = factorial(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = split_and_add(lowerCamelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 705 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : DatasetInfo ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__, "dataset_info.json" ) )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
_SCREAMING_SNAKE_CASE : List[str] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_SCREAMING_SNAKE_CASE : Optional[Any] = yaml.safe_dump(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
_SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : DatasetInfosDict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_SCREAMING_SNAKE_CASE : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__, "README.md" ) )
| 295 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase_ : List[Any] = 'pytorch_model.bin'
lowerCAmelCase_ : str = 'pytorch_model.bin.index.json'
lowerCAmelCase_ : Union[str, Any] = 'adapter_config.json'
lowerCAmelCase_ : Optional[Any] = 'adapter_model.bin'
lowerCAmelCase_ : Dict = 'adapter_model.safetensors'
lowerCAmelCase_ : List[str] = 'tf_model.h5'
lowerCAmelCase_ : Dict = 'tf_model.h5.index.json'
lowerCAmelCase_ : Optional[Any] = 'model.ckpt'
lowerCAmelCase_ : str = 'flax_model.msgpack'
lowerCAmelCase_ : int = 'flax_model.msgpack.index.json'
lowerCAmelCase_ : Tuple = 'model.safetensors'
lowerCAmelCase_ : Tuple = 'model.safetensors.index.json'
lowerCAmelCase_ : str = 'config.json'
lowerCAmelCase_ : List[str] = 'preprocessor_config.json'
lowerCAmelCase_ : Union[str, Any] = FEATURE_EXTRACTOR_NAME
lowerCAmelCase_ : Optional[Any] = 'generation_config.json'
lowerCAmelCase_ : Any = 'modelcard.json'
lowerCAmelCase_ : Union[str, Any] = '▁'
lowerCAmelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase_ : Dict = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase_ : str = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : str ):
"""simple docstring"""
if version.parse(UpperCamelCase__ ) < version.parse(UpperCamelCase__ ):
if "dev" in min_version:
a_ : Tuple = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
a_ : Dict = F"This example requires a minimum version of {min_version},"
error_message += F" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 442 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 442 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowercase :
"""simple docstring"""
lowercase__ = None
lowercase__ = None
lowercase__ = None # sigma(t_i)
@classmethod
def UpperCAmelCase_ ( cls : Tuple ) -> List[str]:
'''simple docstring'''
return cls()
@dataclass
class _lowercase ( __A ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
class _lowercase ( __A , __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] = 0.02 , UpperCamelCase__ : Optional[Any] = 100 , UpperCamelCase__ : Dict = 1.0_07 , UpperCamelCase__ : Optional[Any] = 80 , UpperCamelCase__ : Union[str, Any] = 0.05 , UpperCamelCase__ : Tuple = 50 , ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : int = () ) -> str:
'''simple docstring'''
__UpperCamelCase =jnp.arange(0 , UpperCamelCase__ )[::-1].copy()
__UpperCamelCase =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCamelCase__ , schedule=jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , timesteps=UpperCamelCase__ , )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , ) -> List[str]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCamelCase =min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__UpperCamelCase =0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCamelCase =random.split(UpperCamelCase__ , num=1 )
__UpperCamelCase =self.config.s_noise * random.normal(key=UpperCamelCase__ , shape=sample.shape )
__UpperCamelCase =sigma + gamma * sigma
__UpperCamelCase =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any = True , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =sample_hat + sigma_hat * model_output
__UpperCamelCase =(sample_hat - pred_original_sample) / sigma_hat
__UpperCamelCase =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , state=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] = True , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =sample_prev + sigma_prev * model_output
__UpperCamelCase =(sample_prev - pred_original_sample) / sigma_prev
__UpperCamelCase =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , state=UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 704 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCamelCase =[1_4_4, 1_9_2, 2_4_0]
__UpperCamelCase =[1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__UpperCamelCase =[9_6, 1_2_0, 1_4_4]
__UpperCamelCase =[1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCamelCase =[6_4, 8_0, 9_6]
__UpperCamelCase =[1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__UpperCamelCase =0.0_5
__UpperCamelCase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
__UpperCamelCase =5_1_2
__UpperCamelCase =1_6
__UpperCamelCase =2_1
__UpperCamelCase ='''pascal-voc-id2label.json'''
else:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[str]=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
__UpperCamelCase =name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__UpperCamelCase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
__UpperCamelCase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
__UpperCamelCase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
__UpperCamelCase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
__UpperCamelCase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
__UpperCamelCase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
__UpperCamelCase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
__UpperCamelCase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
__UpperCamelCase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
__UpperCamelCase =name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
__UpperCamelCase =name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
__UpperCamelCase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
__UpperCamelCase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
__UpperCamelCase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
__UpperCamelCase =name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
__UpperCamelCase =name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
__UpperCamelCase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
__UpperCamelCase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCamelCase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
__UpperCamelCase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
__UpperCamelCase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
__UpperCamelCase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
__UpperCamelCase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
__UpperCamelCase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
__UpperCamelCase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
__UpperCamelCase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
__UpperCamelCase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
__UpperCamelCase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCamelCase ='''mobilevit.''' + name
return name
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=False ):
"""simple docstring"""
if base_model:
__UpperCamelCase =''''''
else:
__UpperCamelCase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if key[:8] == "encoder.":
__UpperCamelCase =key[8:]
if "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[0][6:] ) - 1
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
__UpperCamelCase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCamelCase =(
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[dim : dim * 2, :]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[:dim]
__UpperCamelCase =val[dim : dim * 2]
__UpperCamelCase =val[-dim:]
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str=False ):
"""simple docstring"""
__UpperCamelCase =get_mobilevit_config(__UpperCamelCase )
# load original state_dict
__UpperCamelCase =torch.load(__UpperCamelCase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
__UpperCamelCase =MobileViTForSemanticSegmentation(__UpperCamelCase ).eval()
else:
__UpperCamelCase =MobileViTForImageClassification(__UpperCamelCase ).eval()
__UpperCamelCase =convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase =model(**__UpperCamelCase )
__UpperCamelCase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCamelCase =torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCamelCase =torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCamelCase =torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__UpperCamelCase =torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__UpperCamelCase =torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCamelCase =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
__UpperCamelCase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
__UpperCamelCase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCamelCase , organization='''apple''' )
model.push_to_hub(__UpperCamelCase , organization='''apple''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 0 |
"""simple docstring"""
import math
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[int] = 1 ):
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_UpperCAmelCase : Union[str, Any] = qiskit.QuantumRegister(4 , '''qr''' )
_UpperCAmelCase : Optional[Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_UpperCAmelCase : int = [input_a, input_a, carry_in]
_UpperCAmelCase : str = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_UpperCAmelCase : Any = qiskit.Aer.get_backend('''aer_simulator''' )
_UpperCAmelCase : List[Any] = qiskit.execute(__a , __a , shots=1000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase = 3 , _UpperCAmelCase = 7 , _UpperCAmelCase = 1000000 ):
"""simple docstring"""
A_ : Union[str, Any] = 0
A_ : Optional[Any] = 1
for current_denominator in range(1 , limit + 1 ):
A_ : List[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A_ : List[str] = current_numerator
A_ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00)) | 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
# setable values
lowercase_ : Optional[int] = None
lowercase_ : Optional[jnp.ndarray] = None
lowercase_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
return cls()
@dataclass
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : KarrasVeSchedulerState
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case_ = 0.02 , snake_case_ = 1_0_0 , snake_case_ = 1.0_07 , snake_case_ = 8_0 , snake_case_ = 0.05 , snake_case_ = 5_0 , ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = () ):
"""simple docstring"""
A_ : int = jnp.arange(0 , snake_case_ )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=snake_case_ , schedule=jnp.array(snake_case_ , dtype=jnp.floataa ) , timesteps=snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
A_ : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(snake_case_ , num=1 )
A_ : Union[str, Any] = self.config.s_noise * random.normal(key=snake_case_ , shape=sample.shape )
A_ : str = sigma + gamma * sigma
A_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : Any = sample_hat + sigma_hat * model_output
A_ : str = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
A_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError() | 302 | 0 |
import datasets
from .evaluate import evaluate
_lowercase = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_lowercase = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_lowercase = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase_ ( self : List[Any] , __a : List[Any] , __a : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowerCamelCase__: Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowerCamelCase__: Optional[Any] = evaluate(dataset=__a , predictions=__a )
return score
| 306 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowercase = 2_048
_lowercase = 4_096
_lowercase = 42
_lowercase = os.environ.pop('PROCESS_TRAIN', 'false')
_lowercase = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def __lowerCAmelCase ( _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
def choose_first(_UpperCamelCase , _UpperCamelCase=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
lowerCamelCase__: List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCamelCase__: List[str] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
lowerCamelCase__: Any = {"""id""": example["""id"""]}
lowerCamelCase__: Union[str, Any] = example["""annotations"""]
lowerCamelCase__: Union[str, Any] = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCamelCase__: Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
lowerCamelCase__: int = []
lowerCamelCase__: Optional[Any] = []
lowerCamelCase__: str = ["""<cls>"""]
else:
lowerCamelCase__: Dict = ["""short"""]
lowerCamelCase__: Tuple = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
lowerCamelCase__: Union[str, Any] = ["""long"""]
lowerCamelCase__: List[Any] = choose_first(annotation["""long_answer"""] , is_long_answer=_UpperCamelCase )
lowerCamelCase__: Tuple = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCamelCase__: Optional[int] = True
else:
lowerCamelCase__: str = False
lowerCamelCase__: Union[str, Any] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase__: str = example["""document"""]["""tokens"""]
lowerCamelCase__: Dict = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCamelCase__: Union[str, Any] = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCamelCase__: List[Any] = example["""document"""]["""tokens"""]
lowerCamelCase__: Optional[int] = answer["""start_token"""]
lowerCamelCase__: int = answer["""end_token"""]
lowerCamelCase__: Any = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCamelCase__: Optional[Any] = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCamelCase__: Dict = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
lowerCamelCase__: int = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
lowerCamelCase__: Optional[Any] = """ """.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _UpperCamelCase , end="""\n""" )
print("""Old:""" , _UpperCamelCase , end="""\n\n""" )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=2048 , _UpperCamelCase=4096 , _UpperCamelCase=True ) -> str:
'''simple docstring'''
lowerCamelCase__: List[str] = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
lowerCamelCase__: Any = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCamelCase__: Tuple = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
lowerCamelCase__: int = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase__: Any = []
lowerCamelCase__: List[Any] = []
lowerCamelCase__: Union[str, Any] = input_ids[:q_len]
lowerCamelCase__: Optional[int] = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCamelCase__: Optional[int] = i + max_length - q_len
lowerCamelCase__: Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_UpperCamelCase ),
"end_token": [-100] * len(_UpperCamelCase ),
"category": category,
},
}
lowerCamelCase__: Optional[Any] = out["""context"""].split()
lowerCamelCase__: List[str] = splitted_context[answer["""end_token"""]]
lowerCamelCase__: str = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
lowerCamelCase__: List[str] = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCamelCase__: List[Any] = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCamelCase__: Tuple = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
lowerCamelCase__: List[Any] = answer["""start_token"""]
lowerCamelCase__: Optional[int] = answer["""end_token"""]
if assertion:
lowerCamelCase__: str = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _UpperCamelCase , end="""\n\n""" )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCamelCase__: Dict = input_ids[:q_len]
lowerCamelCase__: Dict = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
lowerCamelCase__: Tuple = []
lowerCamelCase__: Optional[int] = []
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCamelCase__: str = i + max_length - q_len
lowerCamelCase__: Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCamelCase__: Union[str, Any] = start_token - i + q_len
lowerCamelCase__: Dict = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
lowerCamelCase__: List[Any] = -100
lowerCamelCase__: str = -100
answers_category.append("""null""" )
lowerCamelCase__: Union[str, Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_UpperCamelCase ) )
print("""Old:""" , tokenizer.decode(_UpperCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=2048 , _UpperCamelCase=4096 , _UpperCamelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with jsonlines.open(_UpperCamelCase , """a""" ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc="""Saving samples ... """ ):
lowerCamelCase__: int = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowercase = load_dataset('natural_questions')
_lowercase = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
_lowercase = data['train' if PROCESS_TRAIN == 'true' else 'validation']
_lowercase = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
_lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowercase = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
_lowercase = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 306 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _lowerCAmelCase ( a_ ):
"""simple docstring"""
__magic_name__ :Optional[Any] = "xmod"
def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("en_XX",) , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ :Optional[Any] = vocab_size
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :List[Any] = num_attention_heads
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :str = intermediate_size
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Any = max_position_embeddings
lowerCAmelCase__ :List[Any] = type_vocab_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Any = position_embedding_type
lowerCAmelCase__ :Any = use_cache
lowerCAmelCase__ :Dict = classifier_dropout
lowerCAmelCase__ :Any = pre_norm
lowerCAmelCase__ :Tuple = adapter_reduction_factor
lowerCAmelCase__ :List[Any] = adapter_layer_norm
lowerCAmelCase__ :str = adapter_reuse_layer_norm
lowerCAmelCase__ :int = ln_before_adapter
lowerCAmelCase__ :Union[str, Any] = list(lowerCamelCase_ )
lowerCAmelCase__ :Tuple = default_language
class _lowerCAmelCase ( a_ ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 93 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "openai/whisper-base"
_UpperCamelCase : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCamelCase : Union[str, Any] = "transcriber"
_UpperCamelCase : Tuple = WhisperProcessor
_UpperCamelCase : Optional[Any] = WhisperForConditionalGeneration
_UpperCamelCase : Union[str, Any] = ["audio"]
_UpperCamelCase : Any = ["text"]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 304 | 0 |
from PIL import Image
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__lowercase = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 720 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
while b > 0:
if b & 1:
A_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 563 | 0 |
def snake_case (UpperCAmelCase__ ) -> list[int]:
UpperCamelCase_: int = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
A_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted)) | 57 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = Dict[str, Any]
UpperCamelCase__ : Optional[Any] = List[Prediction]
@add_end_docstrings(_lowerCamelCase)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , **A__ ) -> List[Any]:
super().__init__(*A__ , **A__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase ( self , **A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *A__ , **A__ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*A__ , **A__ )
def UpperCamelCase ( self , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = load_image(A__ )
_SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]] )
_SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = target_size
return inputs
def UpperCamelCase ( self , A__ ) -> List[Any]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" )
_SCREAMING_SNAKE_CASE = self.model(**A__ )
_SCREAMING_SNAKE_CASE = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
_SCREAMING_SNAKE_CASE = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase ( self , A__ , A__=0.9 ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(A__ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_SCREAMING_SNAKE_CASE = [unnormalize(A__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
_SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
_SCREAMING_SNAKE_CASE = [dict(zip(A__ , A__ ) ) for vals in zip(scores.tolist() , A__ , A__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = raw_annotations[0]
_SCREAMING_SNAKE_CASE = raw_annotation["""scores"""]
_SCREAMING_SNAKE_CASE = raw_annotation["""labels"""]
_SCREAMING_SNAKE_CASE = raw_annotation["""boxes"""]
_SCREAMING_SNAKE_CASE = scores.tolist()
_SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
_SCREAMING_SNAKE_CASE = [self._get_bounding_box(A__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
_SCREAMING_SNAKE_CASE = [
dict(zip(A__ , A__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase ( self , A__ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = box.int().tolist()
_SCREAMING_SNAKE_CASE = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 591 | 0 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ =str(bin(lowercase__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ =str(bin(lowercase__ ) )[2:]
UpperCAmelCase_ =max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550 |
import sys
import turtle
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase : str =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 550 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Tuple = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCAmelCase_ : int = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] )-> str:
'''simple docstring'''
return float((preds == labels).mean() )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[str] )-> Tuple:
'''simple docstring'''
__snake_case = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__snake_case = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = np.array(_lowerCamelCase )
__snake_case = np.array(_lowerCamelCase )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__snake_case = in_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__snake_case = cdist(_lowerCamelCase , _lowerCamelCase , '''cosine''' )
__snake_case = np.array(range(_lowerCamelCase ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 24 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Dict = 3
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
pass
def lowercase_ ( _snake_case ):
for shard in shards:
for i in range(_snake_case ):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" ,type=_snake_case )
parser.add_argument("""--local_rank""" ,type=_snake_case )
parser.add_argument("""--num_workers""" ,type=_snake_case ,default=0 )
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = args.streaming
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.num_workers
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(_snake_case )]}
SCREAMING_SNAKE_CASE__ : str = IterableDataset.from_generator(_snake_case ,gen_kwargs=_snake_case )
if not streaming:
SCREAMING_SNAKE_CASE__ : str = Dataset.from_list(list(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = split_dataset_by_node(_snake_case ,rank=_snake_case ,world_size=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = torch.utils.data.DataLoader(_snake_case ,num_workers=_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ : Tuple = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 223 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = load_iris()
UpperCAmelCase_, UpperCAmelCase_ : Any = data_handling(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
UpperCAmelCase_ : Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
UpperCAmelCase_ : int = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 300 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Dict = {"""tokenizer_file""": """tokenizer.json"""}
lowercase : Optional[Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __snake_case ( lowerCAmelCase ):
_a : str= VOCAB_FILES_NAMES
_a : Any= PRETRAINED_VOCAB_FILES_MAP
_a : int= ["input_ids", "attention_mask"]
_a : Optional[Any]= None
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case="<unk>" ,snake_case="<s>" ,snake_case="</s>" ,snake_case="<pad>" ,snake_case=False ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(
snake_case ,snake_case ,tokenizer_file=snake_case ,unk_token=snake_case ,bos_token=snake_case ,eos_token=snake_case ,pad_token=snake_case ,add_prefix_space=snake_case ,clean_up_tokenization_spaces=snake_case ,**snake_case ,)
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space:
lowercase : Tuple = getattr(snake_case ,pre_tok_state.pop("""type""" ) )
lowercase : Tuple = add_prefix_space
lowercase : Optional[Any] = pre_tok_class(**snake_case )
lowercase : Union[str, Any] = add_prefix_space
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = kwargs.get("""is_split_into_words""" ,snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = kwargs.get("""is_split_into_words""" ,snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._encode_plus(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Any = self._tokenizer.model.save(snake_case ,name=snake_case )
return tuple(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case ,add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
lowercase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 336 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowercase : Dict = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase : Optional[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowercase : Tuple = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class __snake_case ( lowerCAmelCase ):
_a : Tuple= VOCAB_FILES_NAMES
_a : Any= PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str]= ["input_ids", "attention_mask"]
_a : List[Any]= BartTokenizer
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case="replace" ,snake_case="<s>" ,snake_case="</s>" ,snake_case="</s>" ,snake_case="<s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case="<mask>" ,snake_case=False ,snake_case=True ,**snake_case ,):
'''simple docstring'''
super().__init__(
snake_case ,snake_case ,tokenizer_file=snake_case ,errors=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,add_prefix_space=snake_case ,trim_offsets=snake_case ,**snake_case ,)
lowercase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space:
lowercase : Union[str, Any] = getattr(snake_case ,pre_tok_state.pop("""type""" ) )
lowercase : Dict = add_prefix_space
lowercase : List[str] = pre_tok_class(**snake_case )
lowercase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : int = """post_processor"""
lowercase : List[Any] = getattr(self.backend_tokenizer ,snake_case ,snake_case )
if tokenizer_component_instance:
lowercase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Dict = tuple(state["""sep"""] )
if "cls" in state:
lowercase : Dict = tuple(state["""cls"""] )
lowercase : Union[str, Any] = False
if state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space:
lowercase : List[Any] = add_prefix_space
lowercase : List[str] = True
if state.get("""trim_offsets""" ,snake_case ) != trim_offsets:
lowercase : Optional[int] = trim_offsets
lowercase : Dict = True
if changes_to_apply:
lowercase : int = getattr(snake_case ,state.pop("""type""" ) )
lowercase : Any = component_class(**snake_case )
setattr(self.backend_tokenizer ,snake_case ,snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : str = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else value
lowercase : Optional[Any] = value
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Union[str, Any] = self._tokenizer.model.save(snake_case ,name=snake_case )
return tuple(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Union[str, Any] = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 336 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Any , _lowercase : Dict , _lowercase : Any=7 , _lowercase : int=3 , _lowercase : List[Any]=1_8 , _lowercase : str=3_0 , _lowercase : Tuple=4_0_0 , _lowercase : Optional[Any]=True , _lowercase : str=None , _lowercase : str=True , _lowercase : List[str]=None , _lowercase : int=True , ) -> List[str]:
_lowercase = size if size is not None else {"shortest_edge": 2_0}
_lowercase = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_flip_channel_order
def _lowerCamelCase ( self : Any ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case__, unittest.TestCase ):
snake_case_ = MobileViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int ) -> Dict:
_lowercase = MobileViTImageProcessingTester(self )
@property
def _lowerCamelCase ( self : str ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ) -> Union[str, Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_flip_channel_order" ) )
def _lowerCamelCase ( self : Any ) -> int:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowerCamelCase ( self : Union[str, Any] ) -> Dict:
pass
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_lowercase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self : Tuple ) -> int:
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_lowercase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self : str ) -> str:
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_lowercase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 702 | """simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : Union[str, Any] = 1_6
__UpperCamelCase : Optional[int] = 3_2
def __UpperCAmelCase ( _snake_case : Accelerator, _snake_case : int = 1_6, _snake_case : str = "bert-base-cased" ):
_lowercase = AutoTokenizer.from_pretrained(_snake_case )
_lowercase = load_dataset("glue", "mrpc" )
def tokenize_function(_snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_snake_case, max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase = datasets.map(
_snake_case, batched=_snake_case, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=_snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(_snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case, padding="max_length", max_length=1_2_8, return_tensors="pt" )
return tokenizer.pad(_snake_case, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_lowercase = DataLoader(
tokenized_datasets["train"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=_snake_case )
_lowercase = DataLoader(
tokenized_datasets["validation"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=_snake_case )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : Tuple ):
# Initialize accelerator
_lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase = config["lr"]
_lowercase = int(config["num_epochs"] )
_lowercase = int(config["seed"] )
_lowercase = int(config["batch_size"] )
_lowercase = args.model_name_or_path
set_seed(_snake_case )
_lowercase , _lowercase = get_dataloaders(_snake_case, _snake_case, _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase = AutoModelForSequenceClassification.from_pretrained(_snake_case, return_dict=_snake_case )
# Instantiate optimizer
_lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase = optimizer_cls(params=model.parameters(), lr=_snake_case )
if accelerator.state.deepspeed_plugin is not None:
_lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_lowercase = 1
_lowercase = (len(_snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase = get_linear_schedule_with_warmup(
optimizer=_snake_case, num_warmup_steps=0, num_training_steps=_snake_case, )
else:
_lowercase = DummyScheduler(_snake_case, total_num_steps=_snake_case, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = accelerator.prepare(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )
# We need to keep track of how many total steps we have iterated over
_lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase = 0
# Now we train the model
_lowercase = evaluate.load("glue", "mrpc" )
_lowercase = 0
_lowercase = {}
for epoch in range(_snake_case, _snake_case ):
model.train()
for step, batch in enumerate(_snake_case ):
_lowercase = model(**_snake_case )
_lowercase = outputs.loss
_lowercase = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowercase = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase = model(**_snake_case )
_lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_snake_case ) - 1:
_lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_snake_case, references=_snake_case, )
_lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _snake_case )
_lowercase = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_lowercase = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(_snake_case, _snake_case )
def __UpperCAmelCase ( ):
_lowercase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=_snake_case, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=_snake_case, )
parser.add_argument(
"--output_dir", type=_snake_case, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=_snake_case, default=_snake_case, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=_snake_case, default=3, help="Number of train epochs.", )
_lowercase = parser.parse_args()
_lowercase = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(_snake_case, _snake_case )
if __name__ == "__main__":
main() | 227 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Optional[Any] = 32
def A_ ( a , a = 1_6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : List[Any] = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : Any = 8
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
return tokenizer.pad(
a , padding='longest' , max_length=a , pad_to_multiple_of=a , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
SCREAMING_SNAKE_CASE_ : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def A_ ( a , a ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a ) == "1":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : List[str] = config['lr']
SCREAMING_SNAKE_CASE_ : str = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE_ : Any = int(config['seed'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['batch_size'] )
SCREAMING_SNAKE_CASE_ : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_dataloaders(a , a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : List[str] = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=1_0_0 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**a )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE_ : Any = 0
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**a )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE_ : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a , references=a , )
SCREAMING_SNAKE_CASE_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , a )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a , default=a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(a , a )
if __name__ == "__main__":
main()
| 511 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 511 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
UpperCamelCase__ : Any = 4
UpperCamelCase__ : Any = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase__ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 369 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=2, __magic_name__=56, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=2, __magic_name__=7, __magic_name__="gelu_new", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=4, __magic_name__="block_sparse", __magic_name__=True, __magic_name__=False, __magic_name__=2, __magic_name__=3, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : List[Any] = use_attention_mask
UpperCamelCase__ : Any = use_token_type_ids
UpperCamelCase__ : Tuple = use_labels
UpperCamelCase__ : Any = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Dict = num_choices
UpperCamelCase__ : Dict = rescale_embeddings
UpperCamelCase__ : Tuple = attention_type
UpperCamelCase__ : Union[str, Any] = use_bias
UpperCamelCase__ : Union[str, Any] = block_size
UpperCamelCase__ : Optional[int] = num_random_blocks
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : int = None
if self.use_attention_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : str = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a : List[str] = False
a : str = False
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Union[str, Any] = self._prepare_for_class(__magic_name__, __magic_name__ )
UpperCamelCase__ : Tuple = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__, __magic_name__=None, **__magic_name__ ):
return model(input_ids=__magic_name__, attention_mask=__magic_name__, **__magic_name__ )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : str = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : Dict = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__, __magic_name__ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=1E-5, __magic_name__="outputs", __magic_name__=None ) -> List[Any]:
"""simple docstring"""
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
| 369 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase__ : List[str] = TypeVar("T")
class __lowercase ( Generic[T] ):
def __init__( self , lowercase_) -> List[Any]:
__snake_case = data
__snake_case = None
def __str__( self) -> str:
return F"{self.data}"
class __lowercase ( Generic[T] ):
def __init__( self) -> None:
__snake_case = None
def __iter__( self) -> Iterator[T]:
__snake_case = self.top
while node:
yield node.data
__snake_case = node.next
def __str__( self) -> str:
return "->".join([str(lowercase_) for item in self])
def __len__( self) -> int:
return len(tuple(iter(self)))
def _a ( self) -> bool:
return self.top is None
def _a ( self , lowercase_) -> None:
__snake_case = Node(lowercase_)
if not self.is_empty():
__snake_case = self.top
__snake_case = node
def _a ( self) -> T:
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top , lowercase_)
__snake_case = self.top
__snake_case = self.top.next
return pop_node.data
def _a ( self) -> T:
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def _a ( self) -> None:
__snake_case = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 313 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = ['''image_processor''', '''tokenizer''']
lowercase_ : Dict = '''AutoImageProcessor'''
lowercase_ : Dict = '''AutoTokenizer'''
def __init__( self : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : List[Any] ):
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
__snake_case = kwargs.pop('feature_extractor' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.image_processor
__snake_case = False
def __call__( self : Dict , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = kwargs.pop('images' , __lowerCAmelCase )
__snake_case = kwargs.pop('text' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__snake_case = args[0]
__snake_case = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__snake_case = self.image_processor(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['input_ids']
return inputs
def lowercase__ ( self : str , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def lowercase__ ( self : Any ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__snake_case = True
__snake_case = self.tokenizer
yield
__snake_case = self.image_processor
__snake_case = False
def lowercase__ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=None ):
if added_vocab is None:
__snake_case = self.tokenizer.get_added_vocab()
__snake_case = {}
while tokens:
__snake_case = re.search(r'<s_(.*?)>' , __lowerCAmelCase , re.IGNORECASE )
if start_token is None:
break
__snake_case = start_token.group(1 )
__snake_case = re.search(rF'</s_{key}>' , __lowerCAmelCase , re.IGNORECASE )
__snake_case = start_token.group()
if end_token is None:
__snake_case = tokens.replace(__lowerCAmelCase , '' )
else:
__snake_case = end_token.group()
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , __lowerCAmelCase , re.IGNORECASE )
if content is not None:
__snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case = self.tokenajson(__lowerCAmelCase , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if value:
if len(__lowerCAmelCase ) == 1:
__snake_case = value[0]
__snake_case = value
else: # leaf nodes
__snake_case = []
for leaf in content.split(r'<sep/>' ):
__snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(__lowerCAmelCase )
if len(output[key] ) == 1:
__snake_case = output[key][0]
__snake_case = tokens[tokens.find(__lowerCAmelCase ) + len(__lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if len(__lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 427 | 0 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 100 ) -> int:
_A = set()
_A = 0
_A = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
_A = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 2 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 1 |
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _A (lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
if name is None:
_a = None
else:
_a = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_a = fmt.format(lowerCAmelCase__ )
# Print and recurse (if needed).
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if msg is not None:
print(lowerCAmelCase__ )
for k in val.keys():
recursive_print(lowerCAmelCase__ , val[k] , spaces + 2 )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
print(lowerCAmelCase__ , ':' , val.size() )
else:
print(lowerCAmelCase__ , ':' , lowerCAmelCase__ )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
_a = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_a = (num_heads, hidden_size, num_splits) + input_shape[1:]
_a = param.view(*lowerCAmelCase__ )
_a = param.transpose(0 , 2 )
_a = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_a = (num_heads, num_splits, hidden_size) + input_shape[1:]
_a = param.view(*lowerCAmelCase__ )
_a = param.transpose(0 , 1 ).contiguous()
_a = param.view(*lowerCAmelCase__ )
return param
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
_a = {}
# old versions did not store training args
_a = input_state_dict.get('args' , lowerCAmelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_a = ds_args.padded_vocab_size
_a = ds_args.max_position_embeddings
_a = ds_args.hidden_size
_a = ds_args.num_layers
_a = ds_args.num_attention_heads
_a = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_a = config.n_head
# The hidden_size per head.
_a = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_a = input_state_dict['checkpoint_version']
else:
_a = 0.0
# The model.
_a = input_state_dict['model']
# The language model.
_a = model['language_model']
# The embeddings.
_a = lm['embedding']
# The word embeddings.
_a = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_a = word_embeddings[: config.vocab_size, :]
_a = word_embeddings
# The position embeddings.
_a = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_a = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
_a = pos_embeddings
# The transformer.
_a = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_a = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_a = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_a = layer_re.match(lowerCAmelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_a = int(m.group(1 ) )
# The name of the operation.
_a = m.group(2 )
# Is it a weight or a bias?
_a = m.group(3 )
# The name of the layer.
_a = f'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_a = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_a = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_a = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCAmelCase__ , lowerCAmelCase__ )
_a = causal_mask
# Insert a "dummy" tensor for masked_bias.
_a = torch.tensor(-1E4 , dtype=torch.floataa )
_a = masked_bias
_a = fix_query_key_value_ordering(lowerCAmelCase__ , lowerCAmelCase__ , 3 , lowerCAmelCase__ , lowerCAmelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_a = out_val.transpose(0 , 1 ).contiguous()
# Store.
_a = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_a = fix_query_key_value_ordering(lowerCAmelCase__ , lowerCAmelCase__ , 3 , lowerCAmelCase__ , lowerCAmelCase__ )
# Store. No change of shape.
_a = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_a = megatron_to_transformers[op_name]
_a = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_a = megatron_to_transformers[op_name]
_a = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_a = transformer['final_layernorm.weight']
_a = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_a = word_embeddings
# It should be done!
return output_state_dict
def _A () -> Any:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCAmelCase__ , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCAmelCase__ , help='An optional config json file describing the pre-trained model.' , )
_a = parser.parse_args()
# Extract the basename.
_a = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_a = torch.load(lowerCAmelCase__ , map_location='cpu' )
else:
_a = torch.load(args.path_to_checkpoint , map_location='cpu' )
_a = input_state_dict.get('args' , lowerCAmelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_a = 'gelu_fast'
elif ds_args.openai_gelu:
_a = 'gelu_new'
else:
_a = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_a = 'gelu_new'
# Spell out all parameters in case the defaults change.
_a = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCAmelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=lowerCAmelCase__ , summary_activation=lowerCAmelCase__ , summary_proj_to_labels=lowerCAmelCase__ , summary_first_dropout=0.1 , scale_attn_weights=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
_a = GPTaConfig.from_json_file(args.config_file )
_a = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_a = convert_megatron_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCAmelCase__ , lowerCAmelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_a = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_a = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_a = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' )
else:
_a = 'gpt2'
_a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_a = type(lowerCAmelCase__ ).__name__
_a = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCAmelCase__ )
# Save tokenizer based on args
print(f'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(lowerCAmelCase__ )
# Store the state_dict to file.
_a = os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' )
print(f'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 532 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''CLIPImageProcessor'''
lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , _A : List[Any]=None , _A : List[str]=None , **_A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__A , __A )
def __call__( self : List[Any] , _A : List[Any]=None , _A : Dict=None , _A : Any=None , **_A : str ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
__SCREAMING_SNAKE_CASE : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCAmelCase__ ( self : Tuple , *_A : Optional[int] , **_A : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCAmelCase__ ( self : Union[str, Any] , *_A : List[str] , **_A : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , )
return self.image_processor
| 74 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = (boundary[1] - boundary[0]) / steps
_A = boundary[0]
_A = boundary[1]
_A = make_points(_lowercase , _lowercase , _lowercase )
_A = 0.0
y += (h / 2.0) * f(_lowercase )
for i in x_i:
# print(i)
y += h * f(_lowercase )
y += (h / 2.0) * f(_lowercase )
return y
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = a + h
while x < (b - h):
yield x
_A = x + h
def __A ( _lowercase ): # enter your function here
'''simple docstring'''
_A = (x - 0) * (x - 0)
return y
def __A ( ):
'''simple docstring'''
_A = 0.0 # Lower bound of integration
_A = 1.0 # Upper bound of integration
_A = 10.0 # define number of steps or resolution
_A = [a, b] # define boundary of integration
_A = method_a(_lowercase , _lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 484 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_a : List[str] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''dpt'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=384 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=[2, 5, 8, 11] , _lowerCAmelCase="project" , _lowerCAmelCase=[4, 2, 1, 0.5] , _lowerCAmelCase=[96, 192, 384, 768] , _lowerCAmelCase=256 , _lowerCAmelCase=-1 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=255 , _lowerCAmelCase=0.1 , _lowerCAmelCase=[1, 1_024, 24, 24] , _lowerCAmelCase=[0, 1] , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowerCAmelCase__ :Dict = hidden_size
lowerCAmelCase__ :Dict = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
lowerCAmelCase__ :int = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowerCAmelCase__ :Dict = BitConfig(**_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
lowerCAmelCase__ :str = BitConfig(**_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCAmelCase__ :int = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase__ :Union[str, Any] = backbone_featmap_shape
lowerCAmelCase__ :Any = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :List[Any] = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :int = hidden_dropout_prob
lowerCAmelCase__ :Dict = attention_probs_dropout_prob
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :Union[str, Any] = layer_norm_eps
lowerCAmelCase__ :Dict = image_size
lowerCAmelCase__ :Tuple = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :Any = qkv_bias
lowerCAmelCase__ :Optional[int] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
lowerCAmelCase__ :Any = readout_type
lowerCAmelCase__ :Any = reassemble_factors
lowerCAmelCase__ :List[Any] = neck_hidden_sizes
lowerCAmelCase__ :Dict = fusion_hidden_size
lowerCAmelCase__ :Optional[Any] = head_in_index
lowerCAmelCase__ :str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ :Dict = use_auxiliary_head
lowerCAmelCase__ :Union[str, Any] = auxiliary_loss_weight
lowerCAmelCase__ :Optional[int] = semantic_loss_ignore_index
lowerCAmelCase__ :Optional[Any] = semantic_classifier_dropout
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase__ :str = self.backbone_config.to_dict()
lowerCAmelCase__ :str = self.__class__.model_type
return output
| 111 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaPipeline
A = [
'''image_embeds''',
'''negative_image_embeds''',
]
A = ['''image_embeds''', '''negative_image_embeds''']
A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ :List[str] = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_unet
lowerCAmelCase__ :List[str] = self.dummy_movq
lowerCAmelCase__ :List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "cpu"
lowerCAmelCase__ :Dict = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :str = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :Dict = output.images
lowerCAmelCase__ :str = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ :Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowerCAmelCase__ :str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ :str = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = "red cat, 4k photo"
lowerCAmelCase__ :Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ :List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
lowerCAmelCase__ :int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Optional[Any]:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> List[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Dict:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> List[str]:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : str = {
"""hans""": 3,
}
snake_case__ : Optional[Any] = {
"""hans""": HansProcessor,
}
| 23 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( lowerCAmelCase__ ):
lowerCAmelCase__ = 'blenderbot-small'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: int ,__lowerCAmelCase: List[str]=50_265 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: int=8 ,__lowerCAmelCase: Tuple=2_048 ,__lowerCAmelCase: Dict=16 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[Any]=2_048 ,__lowerCAmelCase: Tuple=16 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Any=512 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Optional[Any]=1 ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: int=1 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: int=2 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : List[str] = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Union[str, Any] = encoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : Dict = decoder_layers
_lowerCamelCase : Optional[int] = decoder_attention_heads
_lowerCamelCase : List[Any] = dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : Any = activation_function
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : List[Any] = encoder_layerdrop
_lowerCamelCase : str = decoder_layerdrop
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,is_encoder_decoder=_lowerCamelCase ,decoder_start_token_id=_lowerCamelCase ,forced_eos_token_id=_lowerCamelCase ,**_lowerCamelCase ,)
class A_ ( lowerCAmelCase__ ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCamelCase : Any = {0: """batch"""}
_lowerCamelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCamelCase : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
_lowerCamelCase : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase ,direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCamelCase : str = self.num_layers
for i in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCamelCase : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCamelCase : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Optional[Any] = super().outputs
else:
_lowerCamelCase : Any = super(_lowerCamelCase ,self ).outputs
if self.use_past:
_lowerCamelCase : Dict = self.num_layers
for i in range(_lowerCamelCase ):
_lowerCamelCase : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCamelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] = -1 ,__lowerCAmelCase: List[Any] = -1 ,__lowerCAmelCase: str = False ,__lowerCAmelCase: Dict = None ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Generate decoder inputs
_lowerCamelCase : Optional[int] = seq_length if not self.use_past else 1
_lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCamelCase : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : List[str] = dict(**_lowerCamelCase ,**_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase : List[str] = common_inputs["""input_ids"""].shape
_lowerCamelCase : int = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCamelCase : List[Any] = self.num_attention_heads
_lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : str = decoder_seq_length + 3
_lowerCamelCase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCamelCase ,_lowerCamelCase )] ,dim=1 )
_lowerCamelCase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase : str = self.num_layers
_lowerCamelCase : List[Any] = min(_lowerCamelCase ,_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = max(_lowerCamelCase ,_lowerCamelCase ) - min_num_layers
_lowerCamelCase : List[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
_lowerCamelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase ,_lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Any = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: List[str] = False ,__lowerCAmelCase: int = None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCamelCase : Optional[int] = seqlen + 2
_lowerCamelCase : List[Any] = self.num_layers
_lowerCamelCase : int = self.num_attention_heads
_lowerCamelCase : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Optional[int] = common_inputs["""attention_mask"""].dtype
_lowerCamelCase : Tuple = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCamelCase ,_lowerCamelCase ,dtype=_lowerCamelCase )] ,dim=1 )
_lowerCamelCase : Optional[int] = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] = -1 ,__lowerCAmelCase: str = -1 ,__lowerCAmelCase: Dict = False ,__lowerCAmelCase: Tuple = None ,):
'''simple docstring'''
_lowerCamelCase : Tuple = compute_effective_axis_dimension(
_lowerCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Optional[Any] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
_lowerCamelCase : Any = compute_effective_axis_dimension(
_lowerCamelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : Optional[int] = dict(tokenizer(_lowerCamelCase ,return_tensors=_lowerCamelCase ) )
return common_inputs
def _lowercase ( self: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] = -1 ,__lowerCAmelCase: Tuple = -1 ,__lowerCAmelCase: List[str] = False ,__lowerCAmelCase: Optional[Any] = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase ,batch_size=_lowerCamelCase ,seq_length=_lowerCamelCase ,is_pair=_lowerCamelCase ,framework=_lowerCamelCase )
elif self.task == "causal-lm":
_lowerCamelCase : Any = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase ,batch_size=_lowerCamelCase ,seq_length=_lowerCamelCase ,is_pair=_lowerCamelCase ,framework=_lowerCamelCase )
else:
_lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase ,batch_size=_lowerCamelCase ,seq_length=_lowerCamelCase ,is_pair=_lowerCamelCase ,framework=_lowerCamelCase )
return common_inputs
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Union[str, Any] = super()._flatten_past_key_values_(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
_lowerCamelCase : Dict = super(_lowerCamelCase ,self )._flatten_past_key_values_(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) | 46 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase__ : Tuple = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase , map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase__ : str = convert_pytorch_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase__ : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
return flax_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowerCAmelCase ) -> bool:
return len(set(lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase__ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase__ : int = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase__ : Optional[int] = pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : List[str] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# convert pytorch tensor to numpy
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase__ : Any = flax_model.params["""params"""]
else:
UpperCAmelCase__ : Optional[Any] = flax_model.params
UpperCAmelCase__ : Tuple = flatten_dict(lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : Dict = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase )
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : int = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Union[str, Any] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
import torch
# Load the index
UpperCAmelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : List[Any] = flax_model.params["""params"""]
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase__ : Any = flax_model.params
UpperCAmelCase__ : List[Any] = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[int] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
if "var" in flax_key[-1]:
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Optional[int] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase__ : Dict = getattr(lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase , """rb""" ) as state_f:
try:
UpperCAmelCase__ : Any = from_bytes(lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa , lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase )
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : str = pt_model.state_dict()
UpperCAmelCase__ : Optional[int] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase__ : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase__ : List[str] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : int = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase ) not in pt_model_dict:
# conv layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : str = jnp.transpose(lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ) not in pt_model_dict:
# linear layer
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase__ : Dict = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase__ : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase__ : Optional[Any] = """.""".join(lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase__ : str = key.split(""".""" )
UpperCAmelCase__ : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : Optional[Any] = key_components[:-3] + [name]
UpperCAmelCase__ : Any = """.""".join(lowerCAmelCase )
UpperCAmelCase__ : List[str] = key
if flax_key in special_pt_names:
UpperCAmelCase__ : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase__ : Dict = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase , np.ndarray ) else flax_tensor
UpperCAmelCase__ : Optional[int] = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
UpperCAmelCase__ : Optional[Any] = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 182 | 0 |
'''simple docstring'''
def _snake_case ( A ) -> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
lowerCAmelCase__ = 4
lowerCAmelCase__ = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 98 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__UpperCAmelCase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Any:
lowerCAmelCase__ = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ , repo_id='''test-model-flax''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
def _snake_case ( A , A ) -> Optional[int]:
lowerCAmelCase__ = True
lowerCAmelCase__ = flatten_dict(modela.params )
lowerCAmelCase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ = False
return models_are_equal
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
lowerCAmelCase__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
lowerCAmelCase__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''bert'''
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''bert'''
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ ) | 98 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Optional[Any] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : Tuple = {
'google/rembert': 256,
}
UpperCAmelCase_ : Optional[int] = '▁'
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = RemBertTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : List[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : Any="[MASK]" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
a_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Any = do_lower_case
a_ : List[str] = remove_space
a_ : Union[str, Any] = keep_accents
a_ : str = vocab_file
a_ : Optional[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : List[Any] = [self.sep_token_id]
a_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Union[str, Any] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__ ) )
return
a_ : int = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 570 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 570 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> str:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : str = 0
while num > 0:
UpperCAmelCase_ : Union[str, Any] = num % 8
UpperCAmelCase_ : List[Any] = octal + (remainder * math.floor(math.pow(1_0 ,UpperCamelCase ) ))
counter += 1
UpperCAmelCase_ : Dict = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(UpperCamelCase )}"""
def SCREAMING_SNAKE_CASE( ) -> None:
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(6_5 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_1_6 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 471 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : torch.FloatTensor
class lowercase ( a_, a_ ):
@register_to_config
def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Dict = block_out_channels[0] * 4
UpperCAmelCase_ : List[Any] = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : int = nn.ModuleList([])
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([])
UpperCAmelCase_ : Any = None
# down
UpperCAmelCase_ : Dict = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : int = i == len(_snake_case) - 1
UpperCAmelCase_ : Any = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ : Optional[int] = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case))
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Tuple = out_channels
else:
UpperCAmelCase_ : int = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ : Dict = output_channel
UpperCAmelCase_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ : str = i == len(_snake_case) - 1
UpperCAmelCase_ : Union[str, Any] = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ : Any = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device)
UpperCAmelCase_ : Any = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ : int = timestep_embed[..., None]
UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ : int = down_block_res_samples[-1:]
UpperCAmelCase_ : Tuple = down_block_res_samples[:-1]
UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 471 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = 384
_UpperCamelCase = 7
if "tiny" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 6, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "small" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "base" in model_name:
_UpperCamelCase = 128
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (4, 8, 16, 32)
_UpperCamelCase = 12
_UpperCamelCase = 512
elif "large" in model_name:
_UpperCamelCase = 192
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (6, 12, 24, 48)
_UpperCamelCase = 12
_UpperCamelCase = 768
# set label information
_UpperCamelCase = 150
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''ade20k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(lowercase_, lowercase_, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = SwinConfig(
embed_dim=lowercase_, depths=lowercase_, num_heads=lowercase_, window_size=lowercase_, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
_UpperCamelCase = UperNetConfig(
backbone_config=lowercase_, auxiliary_in_channels=lowercase_, num_labels=lowercase_, idalabel=lowercase_, labelaid=lowercase_, )
return config
def a__ ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a__ ( lowercase : Optional[Any], lowercase : List[str], lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = dct.pop(lowercase_ )
_UpperCamelCase = val
def a__ ( lowercase : Tuple, lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_UpperCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:dim, :]
_UpperCamelCase = in_proj_bias[: dim]
_UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase = in_proj_weight[
-dim :, :
]
_UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowercase : int ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(lowercase_, 4, in_channel // 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(lowercase_, lowercase_ )
return x
def a__ ( lowercase : Tuple ) -> int:
"""simple docstring"""
_UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(lowercase_, in_channel // 4, 4 )
_UpperCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(lowercase_, lowercase_ )
return x
def a__ ( lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(4, in_channel // 4 )
_UpperCamelCase = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(lowercase_ )
return x
def a__ ( lowercase : Dict ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(in_channel // 4, 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(lowercase_ )
return x
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : str ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
_UpperCamelCase = model_name_to_url[model_name]
_UpperCamelCase = torch.hub.load_state_dict_from_url(lowercase_, map_location='''cpu''', file_name=lowercase_ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(lowercase_, param.shape )
_UpperCamelCase = get_upernet_config(lowercase_ )
_UpperCamelCase = UperNetForSemanticSegmentation(lowercase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCamelCase = state_dict.pop(lowercase_ )
if "bn" in key:
_UpperCamelCase = key.replace('''bn''', '''batch_norm''' )
_UpperCamelCase = val
# rename keys
_UpperCamelCase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_, lowercase_, lowercase_ )
read_in_q_k_v(lowercase_, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_UpperCamelCase = reverse_correct_unfold_reduction_order(lowercase_ )
if "norm" in key:
_UpperCamelCase = reverse_correct_unfold_norm_order(lowercase_ )
model.load_state_dict(lowercase_ )
# verify on image
_UpperCamelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_UpperCamelCase = Image.open(requests.get(lowercase_, stream=lowercase_ ).raw ).convert('''RGB''' )
_UpperCamelCase = SegformerImageProcessor()
_UpperCamelCase = processor(lowercase_, return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_UpperCamelCase = model(lowercase_ )
_UpperCamelCase = outputs.logits
print(logits.shape )
print('''First values of logits:''', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_UpperCamelCase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
_UpperCamelCase = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
_UpperCamelCase = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
_UpperCamelCase = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 98 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MgpstrTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_snake_case : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
def __a ( self , **lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = '''tester'''
_snake_case : List[str] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_snake_case : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
_snake_case : Union[str, Any] = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case , _snake_case : int = self.get_input_output_texts(lowercase_ )
_snake_case : Optional[Any] = tokenizer.tokenize(lowercase_ )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
_snake_case : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) , 0 )
_snake_case : str = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __a ( self ) -> str:
'''simple docstring'''
pass
| 326 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowerCamelCase__ ( snake_case__ ):
"""simple docstring"""
UpperCamelCase__ = '''mra'''
def __init__( self : Any ,a__ : Union[str, Any]=5_02_65 ,a__ : List[Any]=7_68 ,a__ : Tuple=12 ,a__ : Any=12 ,a__ : Any=30_72 ,a__ : Optional[int]="gelu" ,a__ : Optional[Any]=0.1 ,a__ : List[str]=0.1 ,a__ : int=5_12 ,a__ : str=1 ,a__ : str=0.02 ,a__ : Optional[int]=1e-5 ,a__ : str="absolute" ,a__ : List[Any]=4 ,a__ : int="full" ,a__ : Any=0 ,a__ : str=0 ,a__ : Any=1 ,a__ : List[Any]=0 ,a__ : Tuple=2 ,**a__ : Dict ,):
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = block_per_row
a__ = approx_mode
a__ = initial_prior_first_n_blocks
a__ = initial_prior_diagonal_n_blocks
| 711 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(_lowercase , _lowercase ) or not isinstance(_lowercase , _lowercase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
a__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ = float(factorial(_lowercase ) )
coefficient /= factorial(_lowercase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.7_5))
| 394 | 0 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : int = 1000 ) -> int:
a__ = 1, 1
a__ = []
for i in range(1 , n + 1 ):
a__ = prev_numerator + 2 * prev_denominator
a__ = prev_numerator + prev_denominator
if len(str(UpperCamelCase__ ) ) > len(str(UpperCamelCase__ ) ):
result.append(UpperCamelCase__ )
a__ = numerator
a__ = denominator
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] )-> List[str]:
lowerCamelCase : Optional[int] = {}
def a__ ( self: Any , __a: int , __a: List[Any] , __a: Optional[int]=1 )-> Optional[Any]:
if self.graph.get(__a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase : Tuple = [[w, v]]
if not self.graph.get(__a ):
lowerCamelCase : Optional[Any] = []
def a__ ( self: str )-> str:
return list(self.graph )
def a__ ( self: Any , __a: int , __a: Any )-> int:
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
def a__ ( self: Optional[int] , __a: str=-2 , __a: Optional[Any]=-1 )-> int:
if s == d:
return []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : str = []
if s == -2:
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowerCamelCase : int = stack[len(__a ) - 1]
else:
lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def a__ ( self: str , __a: str=-1 )-> Optional[Any]:
if c == -1:
lowerCamelCase : List[str] = floor(random() * 10_000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a , __a , 1 )
def a__ ( self: Any , __a: int=-2 )-> List[str]:
lowerCamelCase : List[Any] = deque()
lowerCamelCase : List[str] = []
if s == -2:
lowerCamelCase : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ ( self: Tuple , __a: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a__ ( self: Optional[Any] , __a: Any )-> Optional[int]:
return len(self.graph[u] )
def a__ ( self: Optional[int] , __a: Tuple=-2 )-> List[Any]:
lowerCamelCase : Any = []
lowerCamelCase : Optional[Any] = []
if s == -2:
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Tuple = s
lowerCamelCase : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__a ) != 0:
lowerCamelCase : Dict = stack[len(__a ) - 1]
else:
lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(__a ) == 0:
return sorted_nodes
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Any = []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[str] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Union[str, Any] = -2
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[int] = s
lowerCamelCase : str = False
lowerCamelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : List[Any] = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : Optional[int] = True
if len(__a ) != 0:
lowerCamelCase : Tuple = stack[len(__a ) - 1]
else:
lowerCamelCase : List[str] = False
indirect_parents.append(__a )
lowerCamelCase : Any = s
lowerCamelCase : int = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def a__ ( self: Any )-> int:
lowerCamelCase : str = []
lowerCamelCase : Any = []
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Any = -2
lowerCamelCase : Optional[int] = []
lowerCamelCase : Tuple = s
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : str = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : List[Any] = True
if len(__a ) != 0:
lowerCamelCase : List[str] = stack[len(__a ) - 1]
else:
lowerCamelCase : str = False
indirect_parents.append(__a )
lowerCamelCase : Any = s
lowerCamelCase : List[str] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def a__ ( self: Optional[int] , __a: Tuple=-2 , __a: List[Any]=-1 )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = time()
self.dfs(__a , __a )
lowerCamelCase : Tuple = time()
return end - begin
def a__ ( self: List[str] , __a: Optional[Any]=-2 )-> List[Any]:
lowerCamelCase : str = time()
self.bfs(__a )
lowerCamelCase : Tuple = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self: Any )-> Tuple:
lowerCamelCase : List[Any] = {}
def a__ ( self: Tuple , __a: Any , __a: int , __a: List[Any]=1 )-> Union[str, Any]:
# check if the u exists
if self.graph.get(__a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase : Any = [[w, v]]
# add the other way
if self.graph.get(__a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase : Dict = [[w, u]]
def a__ ( self: Tuple , __a: List[str] , __a: List[str] )-> Any:
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
# the other way round
if self.graph.get(__a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__a )
def a__ ( self: Any , __a: str=-2 , __a: str=-1 )-> Tuple:
if s == d:
return []
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = []
if s == -2:
lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowerCamelCase : List[str] = stack[len(__a ) - 1]
else:
lowerCamelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def a__ ( self: Any , __a: Tuple=-1 )-> List[Any]:
if c == -1:
lowerCamelCase : Any = floor(random() * 10_000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a , __a , 1 )
def a__ ( self: Tuple , __a: int=-2 )-> str:
lowerCamelCase : Dict = deque()
lowerCamelCase : int = []
if s == -2:
lowerCamelCase : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowerCamelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ ( self: str , __a: str )-> Any:
return len(self.graph[u] )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = []
lowerCamelCase : Tuple = []
lowerCamelCase : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : List[str] = -2
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = s
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : Any = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : Any = True
if len(__a ) != 0:
lowerCamelCase : Tuple = stack[len(__a ) - 1]
else:
lowerCamelCase : Dict = False
indirect_parents.append(__a )
lowerCamelCase : str = s
lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def a__ ( self: Any )-> Union[str, Any]:
lowerCamelCase : str = []
lowerCamelCase : str = []
lowerCamelCase : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : List[str] = -2
lowerCamelCase : List[str] = []
lowerCamelCase : Optional[int] = s
lowerCamelCase : Tuple = False
lowerCamelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : Tuple = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : str = True
if len(__a ) != 0:
lowerCamelCase : Optional[int] = stack[len(__a ) - 1]
else:
lowerCamelCase : Tuple = False
indirect_parents.append(__a )
lowerCamelCase : List[str] = s
lowerCamelCase : List[Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def a__ ( self: Tuple )-> Optional[int]:
return list(self.graph )
def a__ ( self: Optional[Any] , __a: Dict=-2 , __a: Optional[Any]=-1 )-> Optional[int]:
lowerCamelCase : List[str] = time()
self.dfs(__a , __a )
lowerCamelCase : Optional[int] = time()
return end - begin
def a__ ( self: Union[str, Any] , __a: Optional[Any]=-2 )-> Any:
lowerCamelCase : Tuple = time()
self.bfs(__a )
lowerCamelCase : Optional[int] = time()
return end - begin
| 222 | 0 |
'''simple docstring'''
import math
import qiskit
def A_ ( _lowerCAmelCase : Tuple = 1 , _lowerCAmelCase : str = 1 , _lowerCAmelCase : List[Any] = 1 ):
"""simple docstring"""
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_lowerCamelCase : Any = qiskit.QuantumRegister(4 , "qr" )
_lowerCamelCase : List[str] = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_lowerCamelCase : Optional[int] = [input_a, input_a, carry_in]
_lowerCamelCase : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
_lowerCamelCase : int = qiskit.Aer.get_backend("aer_simulator" )
_lowerCamelCase : Optional[Any] = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''') | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase_) for s in shape])}.npy'
def UpperCAmelCase__ ( self) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , lowerCamelCase_=(4, 4, 6_4, 6_4) , lowerCamelCase_=False) -> Union[str, Any]:
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_)) , dtype=lowerCamelCase_)
return image
def UpperCAmelCase__ ( self , lowerCamelCase_=False , lowerCamelCase_="CompVis/stable-diffusion-v1-4") -> Tuple:
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = '''bf16''' if fpaa else None
UpperCamelCase , UpperCamelCase = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase_ , subfolder='''unet''' , dtype=lowerCamelCase_ , revision=lowerCamelCase_)
return model, params
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , lowerCamelCase_=(4, 7_7, 7_6_8) , lowerCamelCase_=False) -> Dict:
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_)) , dtype=lowerCamelCase_)
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
])
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCamelCase_)
UpperCamelCase = self.get_latents(lowerCamelCase_ , fpaa=lowerCamelCase_)
UpperCamelCase = self.get_encoder_hidden_states(lowerCamelCase_ , fpaa=lowerCamelCase_)
UpperCamelCase = model.apply(
{'''params''': params} , lowerCamelCase_ , jnp.array(lowerCamelCase_ , dtype=jnp.intaa) , encoder_hidden_states=lowerCamelCase_ , ).sample
assert sample.shape == latents.shape
UpperCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
UpperCamelCase = jnp.array(lowerCamelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
])
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase , UpperCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCamelCase_)
UpperCamelCase = self.get_latents(lowerCamelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCamelCase_)
UpperCamelCase = self.get_encoder_hidden_states(lowerCamelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCamelCase_)
UpperCamelCase = model.apply(
{'''params''': params} , lowerCamelCase_ , jnp.array(lowerCamelCase_ , dtype=jnp.intaa) , encoder_hidden_states=lowerCamelCase_ , ).sample
assert sample.shape == latents.shape
UpperCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
UpperCamelCase = jnp.array(lowerCamelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2) | 34 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
a :int = parent
a :List[Any] = batch_size
a :Optional[int] = seq_length
a :List[Any] = is_training
a :Union[str, Any] = use_input_mask
a :Tuple = use_token_type_ids
a :Tuple = use_labels
a :int = vocab_size
a :int = hidden_size
a :Optional[Any] = num_hidden_layers
a :List[str] = num_attention_heads
a :str = intermediate_size
a :Union[str, Any] = hidden_act
a :Tuple = hidden_dropout_prob
a :Dict = attention_probs_dropout_prob
a :Tuple = max_position_embeddings
a :int = type_vocab_size
a :Union[str, Any] = type_sequence_label_size
a :List[Any] = initializer_range
a :Any = num_labels
a :int = num_choices
a :Optional[Any] = scope
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Dict = None
if self.use_input_mask:
a :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a :Dict = None
if self.use_token_type_ids:
a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a :Tuple = None
a :List[Any] = None
a :Any = None
if self.use_labels:
a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a :Any = ids_tensor([self.batch_size] , self.num_choices )
a :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = NystromformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
a :Union[str, Any] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
a :Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = NystromformerForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Dict = NystromformerForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[str] = self.num_labels
a :Any = NystromformerForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[str] = self.num_labels
a :str = NystromformerForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = self.num_choices
a :Optional[Any] = NystromformerForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
a :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a :List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) :Any = config_and_inputs
a :Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = NystromformerModelTester(self )
a :Tuple = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a :Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a :str = NystromformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
a :Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
a :Optional[int] = model(_lowerCamelCase )[0]
a :Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
a :Dict = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = '''the [MASK] of Belgium is Brussels'''
a :Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
a :str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
a :Dict = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
a :Union[str, Any] = model(encoding.input_ids ).logits
a :Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase ) , '''capital''' )
| 445 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class _UpperCamelCase( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : List[str]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : int = kwargs.pop('add_bos_token' , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
__a : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) )
__a : Tuple = add_prefix_space
__a : Any = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
__a : List[Any] = add_prefix_space
def __lowerCAmelCase ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : List[str] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : Union[str, Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple = None ):
'''simple docstring'''
__a : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
__a : int = input_ids[-self.model_max_length :]
return input_ids
| 701 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] ):
if not nums:
return 0
__a : Any = nums[0]
__a : List[Any] = 0
for num in nums[1:]:
__a , __a : List[Any] = (
max_excluding + num,
max(lowerCamelCase_ , lowerCamelCase_ ),
)
return max(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A :
__snake_case = MBartConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=20, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCAmelCase_ = prepare_mbart_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModel(config=UpperCamelCase__ ).get_decoder()
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = inputs_dict['''head_mask''']
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, use_cache=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case = 'facebook/mbart-large-en-ro'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.translate_src_text(**UpperCamelCase__ )
self.assertListEqual(self.expected_text, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer(self.src_text, **UpperCamelCase__, return_tensors='''tf''' )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 431 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def snake_case__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case__ ( ) -> Any:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
import apache_beam as beam
UpperCAmelCase_ = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase_ = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowercase :
def __init__( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = ''''''
__lowerCAmelCase : List[Any] = ''''''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Dict = 256
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : Optional[int] = 0
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = cva.imread(A_ , 0 )
__lowerCAmelCase : List[str] = copy.deepcopy(self.img )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Tuple = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
__lowerCAmelCase : List[Any] = np.sum(A_ )
for i in range(len(A_ ) ):
__lowerCAmelCase : List[Any] = x[i] / self.k
self.sk += prk
__lowerCAmelCase : Optional[int] = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCAmelCase : int = int(last % last )
__lowerCAmelCase : List[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
__lowerCAmelCase : int = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCAmelCase : List[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCAmelCase : int = self.img[j][i]
if num != self.last_list[num]:
__lowerCAmelCase : Tuple = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 492 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = (16, 32, 96, 256)
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowerCAmelCase : List[Any] = self.block_out_channels[i]
__lowerCAmelCase : Dict = self.block_out_channels[i + 1]
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : List[str] = blocks
__lowerCAmelCase : str = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = self.conv_in(A_ )
__lowerCAmelCase : int = nn.silu(A_ )
for block in self.blocks:
__lowerCAmelCase : int = block(A_ )
__lowerCAmelCase : Union[str, Any] = nn.silu(A_ )
__lowerCAmelCase : List[Any] = self.conv_out(A_ )
return embedding
@flax_register_to_config
class __lowercase (nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase = False
_UpperCamelCase = (320, 640, 1280, 1280)
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = None
_UpperCamelCase = 1280
_UpperCamelCase = 0.0
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = "rgb"
_UpperCamelCase = (16, 32, 96, 256)
def UpperCamelCase__ ( self , A_ ) ->FrozenDict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase : str = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowerCAmelCase : Optional[Any] = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase, __lowerCAmelCase : Any = jax.random.split(A_ )
__lowerCAmelCase : int = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.block_out_channels
__lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase : str = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
__lowerCAmelCase : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__lowerCAmelCase : Union[str, Any] = self.only_cross_attention
if isinstance(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
__lowerCAmelCase : str = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = block_out_channels[0]
__lowerCAmelCase : List[str] = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase : List[Any] = output_channel
__lowerCAmelCase : Optional[int] = block_out_channels[i]
__lowerCAmelCase : str = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
__lowerCAmelCase : str = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
__lowerCAmelCase : int = down_blocks
__lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
__lowerCAmelCase : List[str] = block_out_channels[-1]
__lowerCAmelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_ , A_ = 1.0 , A_ = True , A_ = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowerCAmelCase : List[Any] = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
__lowerCAmelCase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase : Optional[Any] = jnp.expand_dims(A_ , 0 )
__lowerCAmelCase : Optional[int] = self.time_proj(A_ )
__lowerCAmelCase : str = self.time_embedding(A_ )
# 2. pre-process
__lowerCAmelCase : List[str] = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : Union[str, Any] = self.conv_in(A_ )
__lowerCAmelCase : Tuple = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : int = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
__lowerCAmelCase : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
__lowerCAmelCase, __lowerCAmelCase : Dict = down_block(A_ , A_ , A_ , deterministic=not train )
else:
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowerCAmelCase : Union[str, Any] = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
__lowerCAmelCase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
__lowerCAmelCase : List[Any] = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase : Dict = controlnet_down_block_res_samples
__lowerCAmelCase : int = self.controlnet_mid_block(A_ )
# 6. scaling
__lowerCAmelCase : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 492 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__ ( enum.Enum ):
A__ : Dict = 0
A__ : str = 1
A__ : Optional[Any] = 2
@add_end_docstrings(__snake_case )
class a__ ( __snake_case ):
A__ : Optional[int] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__a = None
if self.model.config.prefix is not None:
__a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__a , __a , __a = self._sanitize_parameters(prefix=UpperCAmelCase , **self._forward_params )
__a = {**self._preprocess_params, **preprocess_params}
__a = {**self._forward_params, **forward_params}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]:
__a = {}
if prefix is not None:
__a = prefix
if prefix:
__a = self.tokenizer(
UpperCAmelCase , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
__a = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
__a = handle_long_generation
preprocess_params.update(UpperCAmelCase )
__a = generate_kwargs
__a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__a = ReturnType.TENSORS
if return_type is not None:
__a = return_type
if clean_up_tokenization_spaces is not None:
__a = clean_up_tokenization_spaces
if stop_sequence is not None:
__a = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
if len(UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCAmelCase , **UpperCAmelCase )
def __call__( self , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase=None , **UpperCAmelCase ) -> Tuple:
__a = self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
__a = prompt_text
if handle_long_generation == "hole":
__a = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__a = generate_kwargs['max_new_tokens']
else:
__a = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__a = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__a = inputs['attention_mask'][:, -keep_length:]
return inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , **UpperCAmelCase ) -> int:
__a = model_inputs['input_ids']
__a = model_inputs.get('attention_mask' , UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__a = None
__a = None
__a = 1
else:
__a = input_ids.shape[0]
__a = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__a = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__a = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__a = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__a = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__a = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
__a = generated_sequence.shape[0]
if self.framework == "pt":
__a = generated_sequence.reshape(UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__a = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=ReturnType.FULL_TEXT , UpperCAmelCase=True ) -> str:
__a = model_outputs['generated_sequence'][0]
__a = model_outputs['input_ids']
__a = model_outputs['prompt_text']
__a = generated_sequence.numpy().tolist()
__a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__a = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__a = self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__a = 0
else:
__a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__a = prompt_text + text[prompt_length:]
else:
__a = text[prompt_length:]
__a = {'generated_text': all_text}
records.append(UpperCAmelCase )
return records
| 717 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a__ ( __snake_case , unittest.TestCase ):
A__ : List[str] = TextToVideoSDPipeline
A__ : Dict = TEXT_TO_IMAGE_PARAMS
A__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A__ : int = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
__a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a = CLIPTextModel(UpperCAmelCase )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
if str(UpperCAmelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCAmelCase )
else:
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = TextToVideoSDPipeline(**UpperCAmelCase )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = 'np'
__a = sd_pipe(**UpperCAmelCase ).frames
__a = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__a = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
__a = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = pipe.to('cuda' )
__a = 'Spiderman is surfing'
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2_5 , output_type='pt' ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
__a = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__a = pipe.to('cuda' )
__a = 'Spiderman is surfing'
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 246 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase_ = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = XLNetConfig.from_json_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
SCREAMING_SNAKE_CASE : Optional[int] = finetuning_task
SCREAMING_SNAKE_CASE : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE : int = XLNetForSequenceClassification(__lowerCAmelCase )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE : Union[str, Any] = finetuning_task
SCREAMING_SNAKE_CASE : Optional[int] = XLNetForQuestionAnswering(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = XLNetLMHeadModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE : str = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
print(f"Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}" )
torch.save(model.state_dict() ,__lowerCAmelCase )
print(f"Save configuration file to {os.path.abspath(__lowerCAmelCase )}" )
with open(__lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
UpperCamelCase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 28 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = " " ) -> list:
"""simple docstring"""
snake_case__ : str = []
snake_case__ : int = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case__ : Dict = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 | lowercase_ = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 390 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : str ,*lowerCamelCase : List[str] ,**lowerCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" ,lowerCamelCase ,)
super().__init__(*lowerCamelCase ,**lowerCamelCase )
| 109 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 | """simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 632 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__magic_name__: Optional[int] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase ( _A, _A=None, _A=None, _A=None ):
"""simple docstring"""
__magic_name__ : List[Any] = True
while ask_again:
__magic_name__ : Any = input(_A )
try:
if default is not None and len(_A ) == 0:
return default
return convert_value(_A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_A )
def UpperCamelCase ( _A, _A=[], _A=None, _A=0 ):
"""simple docstring"""
__magic_name__ : List[Any] = BulletMenu(_A, _A )
__magic_name__ : Any = menu.run(default_choice=_A )
return convert_value(_A ) if convert_value is not None else result
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = int(_A )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = int(_A )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = int(_A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : int = int(_A )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = int(_A )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( _A ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class snake_case__ ( argparse.RawDescriptionHelpFormatter ):
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Any = super()._format_usage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Dict = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 324 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableDiffusionPanoramaPipeline
lowercase__ : str = TEXT_TO_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__magic_name__ : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
__magic_name__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__magic_name__ : int = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> str:
__magic_name__ : Any = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Tuple = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Any = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> List[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> str:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = """french fries"""
__magic_name__ : int = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__magic_name__ : Dict = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Dict = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> str:
__magic_name__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
__magic_name__ : List[Any] = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__magic_name__ : str = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : int = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : int = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=lowerCAmelCase__ )
__magic_name__ : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , lowerCAmelCase__=0 ) -> List[Any]:
__magic_name__ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : str = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : Optional[int] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : Tuple = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Tuple = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : int = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __magic_name__ ( self ) -> str:
__magic_name__ : List[str] = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : int = latents[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__magic_name__ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : List[str] = latents[0, -3:, -3:, -1]
__magic_name__ : Dict = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__ ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : List[Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : List[str] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = self.get_inputs()
__magic_name__ : Union[str, Any] = pipe(**lowerCAmelCase__ )
__magic_name__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 324 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase__ = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
lowercase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = True
# hparam_utils.py hparams
lowercase__ = 0.66_46_94
lowercase__ = 0.20_79_51
lowercase__ = 0.12_11_94
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = 0.0_35_25_13
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = False
# hparam_utils.py hparams
lowercase__ = 36.45_19
lowercase__ = 0.90_34_21
lowercase__ = 2_22.0_88
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 0.76_31_41
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
lowercase__ = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
lowercase__ = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase__ = TapasModel(config=A__ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__ , A__ , A__ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowercase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(A__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 642 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase=2_8123 ) -> int:
'''simple docstring'''
lowerCamelCase__ =[1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase__ =set()
lowerCamelCase__ =0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 530 | """simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
lowerCamelCase__ =cross(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 530 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Any , A : Dict , A : str , A : Optional[Any]=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase = convert_pytorch_state_dict_to_flax(A , A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase = convert_pytorch_sharded_state_dict_to_flax(A , A )
return flax_state_dict
def lowerCamelCase__ ( A : Tuple[str] , A : np.ndarray , A : Dict[str, jnp.ndarray] , A : str , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(A : Tuple[str] ) -> bool:
return len(set(A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase = pt_tuple_key[-2] + '''_v'''
if name is not None:
UpperCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(A )
UpperCAmelCase = {}
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : Optional[int] , A : str ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase = torch.load(A )
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
UpperCAmelCase = flatten_dict(A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
if "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : List[str] , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase = getattr(A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(A , '''rb''' ) as state_f:
try:
UpperCAmelCase = from_bytes(A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def lowerCamelCase__ ( A : Tuple , A : List[Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCAmelCase = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = pt_model.state_dict()
UpperCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase = []
UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A ) not in pt_model_dict:
# conv layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ) not in pt_model_dict:
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
UpperCAmelCase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase = '''.'''.join(A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase = key_components[-2] + '''_v'''
if name is not None:
UpperCAmelCase = key_components[:-3] + [name]
UpperCAmelCase = '''.'''.join(A )
UpperCAmelCase = key
if flax_key in special_pt_names:
UpperCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
UpperCAmelCase = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
UpperCAmelCase = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 50 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,snake_case ,snake_case=3 ,snake_case=32 ,snake_case=3 ,snake_case=10 ,snake_case=[10, 20, 30, 40] ,snake_case=[1, 1, 2, 1] ,snake_case=True ,snake_case=True ,snake_case="relu" ,snake_case=3 ,snake_case=None ,):
'''simple docstring'''
lowercase : Tuple = parent
lowercase : int = batch_size
lowercase : str = image_size
lowercase : Tuple = num_channels
lowercase : List[Any] = embeddings_size
lowercase : List[Any] = hidden_sizes
lowercase : List[Any] = depths
lowercase : Optional[Any] = is_training
lowercase : Optional[Any] = use_labels
lowercase : Tuple = hidden_act
lowercase : Optional[int] = num_labels
lowercase : Optional[Any] = scope
lowercase : Dict = len(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Tuple = None
if self.use_labels:
lowercase : Any = ids_tensor([self.batch_size] ,self.num_labels )
lowercase : List[str] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = TFResNetModel(config=snake_case )
lowercase : str = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : int = TFResNetForImageClassification(snake_case )
lowercase : List[Any] = model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Optional[Any]= (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_a : Dict= (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_a : List[Any]= False
_a : List[str]= False
_a : Union[str, Any]= False
_a : int= False
_a : Optional[int]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = TFResNetModelTester(self )
lowercase : Optional[int] = ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(snake_case )
lowercase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[int] = [*signature.parameters.keys()]
lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case ,snake_case ,snake_case ):
lowercase : str = model_class(snake_case )
lowercase : Optional[Any] = model(**self._prepare_for_class(snake_case ,snake_case ) )
lowercase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Dict = layer_type
lowercase : int = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[Any] = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _snake_case( ) -> int:
lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : List[Any] = self.default_image_processor
lowercase : int = prepare_img()
lowercase : Union[str, Any] = image_processor(images=snake_case ,return_tensors="""tf""" )
# forward pass
lowercase : str = model(**snake_case )
# verify the logits
lowercase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
lowercase : List[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,snake_case ,atol=1e-4 ) )
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["""CLIPFeatureExtractor"""]
lowercase : Tuple = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 | 1 |
lowerCAmelCase : List[Any] = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 720 |
def A_ ( a ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
SCREAMING_SNAKE_CASE_ : List[str] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_ : Any = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_ : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowerCAmelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 353 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ):
A__ = []
A__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
A__ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
A__ = output.stdout.decode("""utf-8""" )
A__ = json.loads(UpperCamelCase )
A__ = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
A__ = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ):
return values.split(""",""" )
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 574 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( A_ ):
UpperCamelCase :Union[str, Any] = '''gpt_neo'''
UpperCamelCase :List[str] = ['''past_key_values''']
UpperCamelCase :List[Any] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ):
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Dict = num_layers
lowerCamelCase__ : Optional[Any] = num_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[Any] = window_size
lowerCamelCase__ : Union[str, Any] = activation_function
lowerCamelCase__ : Union[str, Any] = resid_dropout
lowerCamelCase__ : int = embed_dropout
lowerCamelCase__ : List[str] = attention_dropout
lowerCamelCase__ : Optional[Any] = classifier_dropout
lowerCamelCase__ : Any = layer_norm_epsilon
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Optional[Any] = bos_token_id
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Union[str, Any] = attention_types
lowerCamelCase__ : str = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def _snake_case (__magic_name__ ):
lowerCamelCase__ : Tuple = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ) ->str:
'''simple docstring'''
import torch
lowerCamelCase__ : Union[str, Any] = input.size()
lowerCamelCase__ : Any = len(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = shape[dimension]
lowerCamelCase__ : Tuple = torch.arange(0 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
lowerCamelCase__ : Optional[int] = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : List[Any] = [slice(UpperCamelCase )] * rank
lowerCamelCase__ : Optional[Any] = indices
lowerCamelCase__ : Dict = input[s]
lowerCamelCase__ : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _A (UpperCamelCase : Dict , UpperCamelCase : Any ) ->Any:
'''simple docstring'''
import torch
lowerCamelCase__ : Any = torch.arange(1 , UpperCamelCase )
lowerCamelCase__ : Dict = torch.remainder(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = remainders == 0
lowerCamelCase__ : Any = candidates[divisor_indices]
lowerCamelCase__ : Union[str, Any] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class __A ( A_ ):
@property
def _snake_case (self ):
lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase__ : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case (self ):
return self._config.num_heads
def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase__ : List[str] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ ,lowerCamelCase__ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] = seqlen + 2
lowerCamelCase__ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : List[Any] = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def _snake_case (self ):
return 13
| 96 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
class __A ( A_ ):
UpperCamelCase :Union[str, Any] = ['''pixel_values''']
def __init__(self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PIL.Image.BICUBIC , __magic_name__ = True , __magic_name__ = None , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase__ : str = size if size is not None else {"""height""": 256, """width""": 256}
lowerCamelCase__ : Optional[int] = get_size_dict(__magic_name__ )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase__ : Optional[int] = get_size_dict(__magic_name__ , param_name="""crop_size""" )
lowerCamelCase__ : str = do_resize
lowerCamelCase__ : Any = size
lowerCamelCase__ : Tuple = resample
lowerCamelCase__ : Union[str, Any] = do_center_crop
lowerCamelCase__ : List[str] = crop_size
lowerCamelCase__ : int = do_rescale
lowerCamelCase__ : Any = rescale_factor
lowerCamelCase__ : Dict = do_normalize
lowerCamelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = PIL.Image.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase__ : Optional[int] = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
__magic_name__ , size=(size["""height"""], size["""width"""]) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase__ : Tuple = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__=None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = resample if resample is not None else self.resample
lowerCamelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : List[Any] = image_std if image_std is not None else self.image_std
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : List[str] = get_size_dict(__magic_name__ )
lowerCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Optional[Any] = get_size_dict(__magic_name__ , param_name="""crop_size""" )
lowerCamelCase__ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[int] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase__ : int = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCamelCase__ : List[Any] = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase__ : Dict = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase__ : Optional[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase__ : List[str] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 96 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableUnCLIPPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase_ = False
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =3_2
__A =embedder_hidden_size
# prior components
torch.manual_seed(0 )
__A =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__A =CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__A =PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
__A =DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__A =StableUnCLIPImageNormalizer(embedding_dim=_a )
__A =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__A =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__A =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__A =UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
__A =DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
__A =AutoencoderKL()
__A ={
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __UpperCamelCase ( self , lowercase__ , lowercase__=0 ):
'''simple docstring'''
if str(_a ).startswith('''mps''' ):
__A =torch.manual_seed(_a )
else:
__A =torch.Generator(device=_a ).manual_seed(_a )
__A ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__A =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A =torch.Generator(device='''cpu''' ).manual_seed(0 )
__A =pipe('''anime turle''' , generator=_a , output_type='''np''' )
__A =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__A =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A =pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__A =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 184 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( UpperCamelCase , unittest.TestCase ):
__A : List[Any] = CLIPTokenizer
__A : List[str] = CLIPTokenizerFast
__A : str = True
__A : Optional[Any] = {}
__A : List[str] = False
def _lowercase ( self : List[str]):
super().setUp()
# fmt: off
A__ : Dict = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A__ : Any = dict(zip(_A , range(len(_A))))
A__ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
A__ : Any = {"unk_token": "<unk>"}
A__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
A__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(_A) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(_A))
def _lowercase ( self : Optional[int] , **_A : Dict):
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A)
def _lowercase ( self : Any , **_A : List[Any]):
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A)
def _lowercase ( self : str , _A : List[str]):
A__ : Any = "lower newer"
A__ : Dict = "lower newer"
return input_text, output_text
def _lowercase ( self : List[Any]):
A__ : Any = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A__ : Optional[Any] = "lower newer"
A__ : int = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
A__ : Any = tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
A__ : List[str] = tokens + [tokenizer.unk_token]
A__ : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , _A)
@require_ftfy
def _lowercase ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
A__ : Optional[Any] = self.tokenizer_class.from_pretrained(_A , **_A)
A__ : str = self.rust_tokenizer_class.from_pretrained(_A , **_A)
A__ : List[str] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
A__ : Optional[Any] = tokenizer_s.tokenize(_A)
A__ : List[Any] = tokenizer_r.tokenize(_A)
self.assertListEqual(_A , _A)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A__ : Optional[int] = "xa\u0303y" + " " + "x\xe3y"
A__ : Dict = tokenizer_s.tokenize(_A)
A__ : Optional[Any] = tokenizer_r.tokenize(_A)
self.assertListEqual(_A , _A)
# Test that the tokenization is identical on unicode of space type
A__ : Any = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A__ : Union[str, Any] = tokenizer_s.tokenize(_A)
A__ : Optional[Any] = tokenizer_r.tokenize(_A)
self.assertListEqual(_A , _A)
# Test that the tokenization is identical on unicode of line break type
A__ : Optional[int] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A__ : Optional[int] = tokenizer_s.tokenize(_A)
A__ : Union[str, Any] = tokenizer_r.tokenize(_A)
self.assertListEqual(_A , _A)
def _lowercase ( self : Tuple):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
A__ : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A__ : int = F'{text_of_1_token} {text_of_1_token}'
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , )
A__ : Any = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_A) + 1, len(_A) + 1 + len(_A)) , )
A__ : Optional[int] = F' {text}'
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , )
A__ : Union[str, Any] = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_A) + 1, 1 + len(_A) + 1 + len(_A)) , )
def _lowercase ( self : List[Any]):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_A) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer")
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format."))
@require_ftfy
def _lowercase ( self : int):
super().test_tokenization_python_rust_equals()
def _lowercase ( self : Optional[Any]):
# CLIP always lower cases letters
pass | 182 |
snake_case : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case__ ( ) -> None:
"""simple docstring"""
A__ : Union[str, Any] = input("Enter message: " )
A__ : Tuple = input("Enter key [alphanumeric]: " )
A__ : Optional[Any] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ : Tuple = "encrypt"
A__ : str = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith("d" ):
A__ : Optional[Any] = "decrypt"
A__ : Union[str, Any] = decrypt_message(__lowercase , __lowercase )
print(F'\n{mode.title()}ed message:' )
print(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "encrypt" )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "decrypt" )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
A__ : Dict = []
A__ : Union[str, Any] = 0
A__ : List[Any] = key.upper()
for symbol in message:
A__ : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A__ : Optional[int] = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main() | 182 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__magic_name__ :int = int(re.match(R'''.*layer_(\d*).*''', _lowerCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def __lowercase ( snake_case ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__magic_name__ :List[Any] = re.search(R'''[^\d](\d+)$''', str(_lowerCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
__magic_name__ :str = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
if bloom_config_file == "":
__magic_name__ :Optional[Any] = BloomConfig()
else:
__magic_name__ :List[str] = BloomConfig.from_json_file(_lowerCamelCase )
if shard_model:
__magic_name__ :Tuple = os.listdir(_lowerCamelCase )
__magic_name__ :Any = sorted(filter(lambda snake_case : s.startswith('''layer''' ) and "model_00" in s, _lowerCamelCase ) )
__magic_name__ :Optional[Any] = {'''weight_map''': {}, '''metadata''': {}}
__magic_name__ :Dict = 0
__magic_name__ :Optional[int] = None
__magic_name__ :Dict = BloomConfig()
for j, file in enumerate(_lowerCamelCase ):
print('''Processing file: {}'''.format(_lowerCamelCase ) )
__magic_name__ :Optional[Any] = None
for i in range(_lowerCamelCase ):
# load all TP files
__magic_name__ :Union[str, Any] = file.replace('''model_00''', f'''model_0{i}''' )
__magic_name__ :Any = torch.load(os.path.join(_lowerCamelCase, _lowerCamelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
__magic_name__ :Optional[int] = list(temp.keys() )
for key in keys:
__magic_name__ :Optional[Any] = temp.pop(_lowerCamelCase )
if tensors is None:
__magic_name__ :List[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ :List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ :List[Any] = torch.cat([tensors[key], temp[key]], dim=_lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ :Dict = tensors[key] / pretraining_tp
torch.save(
_lowerCamelCase, os.path.join(
_lowerCamelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(_lowerCamelCase ) ).zfill(5 ) ), ), )
for key in tensors.keys():
__magic_name__ :List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__magic_name__ :List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(_lowerCamelCase ) ).zfill(5 ) )
__magic_name__ :Tuple = BloomConfig()
__magic_name__ :Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__magic_name__ :Tuple = total_size
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowerCamelCase, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Optional[int] = json.dumps(_lowerCamelCase, indent=2, sort_keys=_lowerCamelCase ) + '''\n'''
f.write(_lowerCamelCase )
else:
__magic_name__ :str = BloomModel(_lowerCamelCase )
__magic_name__ :Dict = os.listdir(_lowerCamelCase )
__magic_name__ :Optional[int] = sorted(filter(lambda snake_case : s.startswith('''layer''' ) and "model_00" in s, _lowerCamelCase ) )
__magic_name__ :Union[str, Any] = None
for i, file in enumerate(_lowerCamelCase ):
__magic_name__ :Any = None
for i in range(_lowerCamelCase ):
# load all TP files
__magic_name__ :Optional[Any] = file.replace('''model_00''', f'''model_0{i}''' )
__magic_name__ :Dict = torch.load(os.path.join(_lowerCamelCase, _lowerCamelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
__magic_name__ :List[str] = list(temp.keys() )
for key in keys:
__magic_name__ :str = temp.pop(_lowerCamelCase )
if tensors is None:
__magic_name__ :Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ :str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ :int = torch.cat([tensors[key], temp[key]], dim=_lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ :Any = tensors[key] / pretraining_tp
__magic_name__ :Any = model.load_state_dict(_lowerCamelCase, strict=_lowerCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__magic_name__ :List[str] = set(other_keys.missing_keys )
else:
__magic_name__ :str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowerCamelCase, exist_ok=_lowerCamelCase )
__magic_name__ :Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__magic_name__ :Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__magic_name__ :Any = model.to(config.torch_dtype )
torch.save(model.state_dict(), _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = """summarization"""
SCREAMING_SNAKE_CASE : Any = ["""loss"""]
SCREAMING_SNAKE_CASE : Optional[int] = ROUGE_KEYS
SCREAMING_SNAKE_CASE : Optional[int] = """rouge2"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , mode=self.mode , **__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ = Path(self.output_dir ) / 'metrics.json'
lowerCamelCase_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.config.model_type
lowerCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCamelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ = get_git_info()['repo_sha']
lowerCamelCase_ = hparams.num_workers
lowerCamelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ = self.decoder_start_token_id
lowerCamelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
lowerCamelCase_ = False
lowerCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ = self.hparams.eval_max_gen_length
else:
lowerCamelCase_ = self.model.config.max_length
lowerCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
lowerCamelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
lowerCamelCase_ = True
return readable_batch
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
return self.model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] ) -> int:
lowerCamelCase_ = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : dict ) -> Tuple:
lowerCamelCase_ = self.tokenizer.pad_token_id
lowerCamelCase_ , lowerCamelCase_ = batch['input_ids'], batch['attention_mask']
lowerCamelCase_ = batch['labels']
if isinstance(self.model , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = shift_tokens_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.tokenizer.pad_token_id
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
# tokens per batch
lowerCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].shape[0]
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Dict:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="val" ) -> Dict:
self.step_count += 1
lowerCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ = losses['loss']
lowerCamelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCamelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
lowerCamelCase_ = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
lowerCamelCase_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
return calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : dict ) -> dict:
lowerCamelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCamelCase_ = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.ids_to_clean_text(batch['labels'] )
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = np.mean(lmap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE , gen_len=__SCREAMING_SNAKE_CASE , preds=__SCREAMING_SNAKE_CASE , target=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Any:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE , prefix='test' )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> SeqaSeqDataset:
lowerCamelCase_ = self.n_obs[type_path]
lowerCamelCase_ = self.target_lens[type_path]
lowerCamelCase_ = self.dataset_class(
self.tokenizer , type_path=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
lowerCamelCase_ = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Dict ) -> DataLoader:
lowerCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
'--max_source_length' , default=1024 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--max_tokens_per_batch' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--logger_name' , type=__SCREAMING_SNAKE_CASE , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=__SCREAMING_SNAKE_CASE , default=500 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=__SCREAMING_SNAKE_CASE , default='summarization' , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=__SCREAMING_SNAKE_CASE , default=0.0 , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--src_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--tgt_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--eval_beams' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--val_metric' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=__SCREAMING_SNAKE_CASE , default=1 , required=__SCREAMING_SNAKE_CASE , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """translation"""
SCREAMING_SNAKE_CASE : List[str] = ["""loss"""]
SCREAMING_SNAKE_CASE : str = ["""bleu"""]
SCREAMING_SNAKE_CASE : Optional[int] = """bleu"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hparams.src_lang
lowerCamelCase_ = hparams.tgt_lang
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
return calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ = SummarizationModule(_lowerCamelCase )
else:
lowerCamelCase_ = TranslationModule(_lowerCamelCase )
lowerCamelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
lowerCamelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = os.environ.get('WANDB_PROJECT' , _lowerCamelCase )
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
lowerCamelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase_ = False
lowerCamelCase_ = args.val_metric == 'loss'
lowerCamelCase_ = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
lowerCamelCase_ = ''
lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_lowerCamelCase ) )
if checkpoints:
lowerCamelCase_ = checkpoints[-1]
lowerCamelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : Any = pl.Trainer.add_argparse_args(parser)
_SCREAMING_SNAKE_CASE : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 549 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __magic_name__ ( UpperCAmelCase__):
_UpperCAmelCase : int = 'openai-gpt'
_UpperCAmelCase : Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict ,__SCREAMING_SNAKE_CASE : Dict=4_0_4_7_8 ,__SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 ,__SCREAMING_SNAKE_CASE : int=7_6_8 ,__SCREAMING_SNAKE_CASE : int=1_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=1_2 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : List[str]=0.1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : str=1e-5 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]="cls_index" ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=None ,__SCREAMING_SNAKE_CASE : Any=True ,__SCREAMING_SNAKE_CASE : str=0.1 ,**__SCREAMING_SNAKE_CASE : Dict ,):
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 710 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCAmelCase =None
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase ={
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__lowerCAmelCase ="▁"
class __magic_name__ ( _a):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple=None ,__SCREAMING_SNAKE_CASE : int=None ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Optional[int]=False ,__SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" ,__SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,__SCREAMING_SNAKE_CASE : Any="<unk>" ,__SCREAMING_SNAKE_CASE : int="[SEP]" ,__SCREAMING_SNAKE_CASE : Union[str, Any]="<pad>" ,__SCREAMING_SNAKE_CASE : str="[CLS]" ,__SCREAMING_SNAKE_CASE : Tuple="[MASK]" ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ,normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,remove_space=__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 405 | 0 |
import sys
def UpperCamelCase_( _A :Optional[Any] )-> Tuple:
UpperCamelCase__ = len(_A )
UpperCamelCase__ = [[0 for x in range(_A )] for x in range(_A )]
UpperCamelCase__ = [[0 for x in range(_A )] for x in range(_A )]
for chain_length in range(2 , _A ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase__ = a + chain_length - 1
UpperCamelCase__ = sys.maxsize
for c in range(_A , _A ):
UpperCamelCase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase__ = cost
UpperCamelCase__ = c
return matrix, sol
def UpperCamelCase_( _A :Tuple , _A :Union[str, Any] , _A :str )-> int:
if i == j:
print("A" + str(_A ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(_A , _A , optimal_solution[i][j] )
print_optiomal_solution(_A , optimal_solution[i][j] + 1 , _A )
print(")" , end=" " )
def UpperCamelCase_( )-> str:
UpperCamelCase__ = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase__ = len(_A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase__, UpperCamelCase__ = matrix_chain_order(_A )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(_A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 551 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 551 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( ) -> Optional[Any]:
__lowercase = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def UpperCAmelCase__ ( lowercase__=None ) -> Dict:
if subparsers is not None:
__lowercase = subparsers.add_parser("""config""" , description=__UpperCamelCase )
else:
__lowercase = argparse.ArgumentParser("""Accelerate config command""" , description=__UpperCamelCase )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def UpperCAmelCase__ ( lowercase__ ) -> str:
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__lowercase = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F"accelerate configuration saved at {config_file}" )
def UpperCAmelCase__ ( ) -> List[str]:
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A ( snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
def wrapper(*snake_case__ : Any , **snake_case__ : List[str] ):
__snake_case = timeit.default_timer()
__snake_case = func(*lowerCamelCase_ , **lowerCamelCase_ )
__snake_case = timeit.default_timer() - starttime
return delta
__snake_case = func.__name__
return wrapper
def A ( snake_case__ : dict , snake_case__ : Tuple=100 , snake_case__ : Tuple=None ) -> Tuple:
'''simple docstring'''
__snake_case = []
__snake_case = seq_shapes or {}
for i in range(lowerCamelCase_ ):
__snake_case = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase_ , _ArrayXD ):
__snake_case = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase_ , datasets.Value ):
if v.dtype == "string":
__snake_case = '''The small grey turtle was surprisingly fast when challenged.'''
else:
__snake_case = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase_ , datasets.Sequence ):
while isinstance(lowerCamelCase_ , datasets.Sequence ):
__snake_case = v.feature
__snake_case = seq_shapes[k]
__snake_case = np.random.rand(*lowerCamelCase_ ).astype(v.dtype )
__snake_case = data
dummy_data.append((i, example) )
return dummy_data
def A ( snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=100 , snake_case__ : Any=None ) -> Dict:
'''simple docstring'''
__snake_case = generate_examples(lowerCamelCase_ , num_examples=lowerCamelCase_ , seq_shapes=lowerCamelCase_ )
with ArrowWriter(features=lowerCamelCase_ , path=lowerCamelCase_ ) as writer:
for key, record in dummy_data:
__snake_case = features.encode_example(lowerCamelCase_ )
writer.write(lowerCamelCase_ )
__snake_case = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
__snake_case = datasets.Dataset.from_file(filename=lowerCamelCase_ , info=datasets.DatasetInfo(features=lowerCamelCase_ ) )
return dataset
| 313 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ =(
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCAmelCase__ : int = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
return inputs_dict
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Tuple = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Union[str, Any] = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[int] = embedding_size
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,embedding_size=self.embedding_size ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFMobileBertModel(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : int = model(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = [input_ids, input_mask]
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
lowerCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.num_choices
lowerCAmelCase__ : Any = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Any = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Tuple = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
lowerCAmelCase__ : List[Any] = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowerCAmelCase__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Dict = model(__lowerCamelCase )[0]
lowerCAmelCase__ : Optional[int] = [1, 6, 3_05_22]
self.assertEqual(output.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCamelCase ,atol=1e-4 )
| 647 | 0 |
'''simple docstring'''
def snake_case__ ( _A: int = 1000 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 713 | '''simple docstring'''
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 605 | 0 |
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = np.max(__snake_case , axis=-1 , keepdims=__snake_case )
_UpperCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
if "second_text" in kwargs:
_UpperCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any=None ) -> Dict:
"""simple docstring"""
return self.tokenizer(lowerCamelCase , text_pair=lowerCamelCase , return_tensors=self.framework )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
return self.model(**lowerCamelCase )
def lowerCamelCase ( self : List[str] , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = model_outputs.logits[0].numpy()
_UpperCAmelCase = softmax(lowerCamelCase )
_UpperCAmelCase = np.argmax(lowerCamelCase )
_UpperCAmelCase = self.model.config.idalabel[best_class]
_UpperCAmelCase = probabilities[best_class].item()
_UpperCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 108 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase = 5_0_0_0_0_0_0_0 ):
__snake_case : str = set()
__snake_case : Optional[int] = int((limit - 2_4) ** (1 / 2) )
__snake_case : List[str] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
__snake_case : int = primea * primea
for primea in primes:
__snake_case : Tuple = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
__snake_case : Optional[Any] = primea * primea * primea * primea
__snake_case : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Optional[int] = "▁"
_snake_case : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BertGenerationTokenizer
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[Any] = True
def __snake_case ( self : Optional[int] ) -> Optional[int]:
super().setUp()
__snake_case : Tuple = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict ) -> int:
__snake_case : str = "<s>"
__snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCamelCase ) , 1002 )
def __snake_case ( self : List[str] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : List[Any] = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__snake_case : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
__snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : str ) -> List[Any]:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : Union[str, Any] = "Hello World!"
__snake_case : List[str] = [18536, 2260, 101]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : List[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__snake_case : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def __snake_case ( self : Optional[Any] ) -> str:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__snake_case : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Union[str, Any] = " ".join(lowerCamelCase )
__snake_case : Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__snake_case : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__snake_case : List[Any] = BertGenerationConfig()
__snake_case : Dict = BertGenerationEncoder(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> List[Any]:
# fmt: off
__snake_case : Optional[Any] = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 203 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[Any] =split_dict._to_yaml_list()
assert len(__a ) == len(__a )
lowerCamelCase__: List[str] =SplitDict._from_yaml_list(__a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__: Any =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__: str =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 59 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase_ = 'pytorch_model.bin'
lowercase_ = 'pytorch_model.bin.index.json'
lowercase_ = 'adapter_config.json'
lowercase_ = 'adapter_model.bin'
lowercase_ = 'adapter_model.safetensors'
lowercase_ = 'tf_model.h5'
lowercase_ = 'tf_model.h5.index.json'
lowercase_ = 'model.ckpt'
lowercase_ = 'flax_model.msgpack'
lowercase_ = 'flax_model.msgpack.index.json'
lowercase_ = 'model.safetensors'
lowercase_ = 'model.safetensors.index.json'
lowercase_ = 'config.json'
lowercase_ = 'preprocessor_config.json'
lowercase_ = FEATURE_EXTRACTOR_NAME
lowercase_ = 'generation_config.json'
lowercase_ = 'modelcard.json'
lowercase_ = '▁'
lowercase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a ( A__ : List[str] ) -> str:
"""simple docstring"""
if version.parse(A__ ) < version.parse(A__ ):
if "dev" in min_version:
_lowercase =(
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
_lowercase =F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 291 | 0 |
def snake_case__ ( lowerCamelCase_ ):
return 10 - x * x
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) >= 0:
raise ValueError('''Wrong space!''' )
A : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
A : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) < 0:
A : str = c
else:
A : Dict = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 423 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TextToVideoSDPipeline
UpperCAmelCase_ : str = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase_ : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ) -> int:
torch.manual_seed(0 )
A : Any = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
A : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
A : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
A : int = CLIPTextModel(__UpperCAmelCase )
A : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[Any]:
if str(__UpperCAmelCase ).startswith('''mps''' ):
A : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
A : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case ( self ) -> List[str]:
A : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : Dict = self.get_dummy_components()
A : Any = TextToVideoSDPipeline(**__UpperCAmelCase )
A : Union[str, Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
A : Optional[int] = '''np'''
A : Dict = sd_pipe(**__UpperCAmelCase ).frames
A : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A : Any = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> Dict:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case ( self ) -> Any:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case ( self ) -> Tuple:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case ( self ) -> List[str]:
pass
def snake_case ( self ) -> Dict:
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> int:
A : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
A : List[Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
A : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A : Optional[int] = pipe.to('''cuda''' )
A : List[Any] = '''Spiderman is surfing'''
A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames
A : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case ( self ) -> Union[str, Any]:
A : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
A : Tuple = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
A : Any = pipe.to('''cuda''' )
A : int = '''Spiderman is surfing'''
A : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames
A : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 423 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.get_dummy_input()
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> int:
'''simple docstring'''
lowerCamelCase_ = 4
lowerCamelCase_ = 32
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (batch_size, num_channels) + sizes
lowerCamelCase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'hidden_states': hidden_states}
if include_temb:
lowerCamelCase_ = 128
lowerCamelCase_ = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
if include_res_hidden_states_tuple:
lowerCamelCase_ = torch.manual_seed(1 )
lowerCamelCase_ = (randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ),)
if include_encoder_hidden_states:
lowerCamelCase_ = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE_ )
if include_skip_sample:
lowerCamelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
return dummy_input
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCamelCase_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**SCREAMING_SNAKE_CASE_ )
unet_block.to(SCREAMING_SNAKE_CASE_ )
unet_block.eval()
with torch.no_grad():
lowerCamelCase_ = unet_block(**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase_ = output[0, -1, -3:, -3:]
lowerCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output[0]
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
| 42 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]):
return float((preds == labels).mean())
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : Union[str, Any] = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : List[Any] = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__))
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict):
lowerCamelCase : Tuple = np.array(UpperCAmelCase__)
lowerCamelCase : str = np.array(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = en_sentvecs.shape[0]
# mean centering
lowerCamelCase : Optional[int] = en_sentvecs - np.mean(UpperCAmelCase__ , axis=0)
lowerCamelCase : Union[str, Any] = in_sentvecs - np.mean(UpperCAmelCase__ , axis=0)
lowerCamelCase : Any = cdist(UpperCAmelCase__ , UpperCAmelCase__ , 'cosine')
lowerCamelCase : Optional[Any] = np.array(range(UpperCAmelCase__))
lowerCamelCase : Optional[int] = sim.argsort(axis=1)[:, :10]
lowerCamelCase : Dict = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ), codebase_urls=[], reference_urls=[], format='numpy' if self.config_name != 'cvit-mkb-clsr' else None, )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(A, A )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(A, A )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(A, A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 320 | 0 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> bytes:
_UpperCAmelCase : Any = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
_UpperCAmelCase : Tuple = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase ).content
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter Video/IGTV url: ').strip()
SCREAMING_SNAKE_CASE_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 467 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class a ( UpperCAmelCase ):
_lowercase = "openai-gpt"
_lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A_=40478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : str = n_positions
_UpperCAmelCase : List[Any] = n_embd
_UpperCAmelCase : Dict = n_layer
_UpperCAmelCase : Any = n_head
_UpperCAmelCase : int = afn
_UpperCAmelCase : int = resid_pdrop
_UpperCAmelCase : Tuple = embd_pdrop
_UpperCAmelCase : Optional[Any] = attn_pdrop
_UpperCAmelCase : Any = layer_norm_epsilon
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = summary_type
_UpperCAmelCase : List[Any] = summary_use_proj
_UpperCAmelCase : Optional[Any] = summary_activation
_UpperCAmelCase : int = summary_first_dropout
_UpperCAmelCase : List[str] = summary_proj_to_labels
super().__init__(**A_ )
| 467 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __snake_case ( unittest.TestCase):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_lowerCamelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : List[Any] = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
_lowerCamelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_lowerCamelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : List[str] = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
_lowerCamelCase : str = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.3_33, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : Union[str, Any] = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowerCamelCase : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : Dict = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowerCamelCase : List[str] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 83 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = '''▁'''
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE :List[str] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE :Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : Dict , snake_case_ : Optional[int] , snake_case_ : Dict="<s>" , snake_case_ : Dict="</s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Union[str, Any]="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : Optional[int]=None , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : List[str]=None , **snake_case_ : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , tokenizer_file=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else "en_XX"
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , snake_case_ : List[Any] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self : Union[str, Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def lowercase ( self : Optional[Any] , snake_case_ : str ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
_UpperCAmelCase = self.convert_tokens_to_ids(snake_case_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowercase ( self : Any ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self : Union[str, Any] , snake_case_ : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def lowercase ( self : List[str] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowercase ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : str = "en_XX" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro_RO" , **snake_case_ : int , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self : Optional[int] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = self.lang_code_to_id[src_lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
def lowercase ( self : int , snake_case_ : str ):
_UpperCAmelCase = self.lang_code_to_id[lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
| 236 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = ['model.decoder.embed_positions.weights']
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
if "emb" in name:
A__ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
A__ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
A__ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
A__ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
A__ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
A__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
A__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
A__ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
A__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _snake_case ( UpperCAmelCase_ : OrderedDict , UpperCAmelCase_ : int ):
A__ = list(state_dict.keys() )
A__ = {}
for key in keys:
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = rename_keys(UpperCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
A__ = val[:hidden_size, :]
A__ = val[hidden_size : 2 * hidden_size, :]
A__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A__ = val
else:
A__ = val
return state_dict, enc_dec_proj_state_dict
def _snake_case ( UpperCAmelCase_ : str ):
if checkpoint == "small":
# default config values
A__ = 1024
A__ = 24
A__ = 16
elif checkpoint == "medium":
A__ = 1536
A__ = 48
A__ = 24
elif checkpoint == "large":
A__ = 2048
A__ = 48
A__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A__ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , )
return config
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any="cpu" ):
A__ = MusicGen.get_pretrained(UpperCAmelCase_ , device=UpperCAmelCase_ )
A__ = decoder_config_from_checkpoint(UpperCAmelCase_ )
A__ = fairseq_model.lm.state_dict()
A__ , A__ = rename_state_dict(
UpperCAmelCase_ , hidden_size=decoder_config.hidden_size )
A__ = TaEncoderModel.from_pretrained("""t5-base""" )
A__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
A__ = MusicgenForCausalLM(UpperCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A__ , A__ = decoder.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A__ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase_ , audio_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase_ )
# check we can do a forward pass
A__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A__ = model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
A__ = AutoTokenizer.from_pretrained("""t5-base""" )
A__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
A__ = MusicgenProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# set the appropriate bos/pad token ids
A__ = 2048
A__ = 2048
# set other default generation config params
A__ = int(30 * audio_encoder.config.frame_rate )
A__ = True
A__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCAmelCase_ )
processor.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 500 |
"""simple docstring"""
import baseaa
def _snake_case ( UpperCAmelCase_ : str ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def _snake_case ( UpperCAmelCase_ : bytes ):
return baseaa.aaadecode(UpperCAmelCase_ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(1 , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = collection[i]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = i - 1
while low <= high:
SCREAMING_SNAKE_CASE__ = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE__ = mid - 1
else:
SCREAMING_SNAKE_CASE__ = mid + 1
for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
SCREAMING_SNAKE_CASE__ = collection[j - 1]
SCREAMING_SNAKE_CASE__ = val
return collection
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted)) | 6 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ="""The Nymphenburg Palace is a beautiful palace in Munich!"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
lowercase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_0_2_4,
"""hidden_size""": 7_6_8,
"""max_length""": 5_1_2,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_0_2_4,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowercase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCAmelCase__ , output_all_encodings=lowerCAmelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCAmelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowercase = os.path.join(get_home_dir() , """models""" )
lowercase = _load_vocab(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls=lowerCAmelCase__ )
lowercase = nlp.model.BERTModel(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCAmelCase__ , use_token_type_embed=lowerCAmelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCAmelCase__ , use_decoder=lowerCAmelCase__ , )
original_bort.load_parameters(lowerCAmelCase__ , cast_dtype=lowerCAmelCase__ , ignore_extra=lowerCAmelCase__ )
lowercase = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCAmelCase__ ),
}
lowercase = BertConfig.from_dict(lowerCAmelCase__ )
lowercase = BertForMaskedLM(lowerCAmelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase__ :Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ):
lowercase = hf_param.shape
lowercase = to_torch(params[gluon_param] )
lowercase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase = layer.attention.self
lowercase = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
lowercase = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
lowercase = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
lowercase = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
lowercase = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
lowercase = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
lowercase = layer.attention.output
lowercase = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
lowercase = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
lowercase = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
lowercase = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
lowercase = layer.intermediate
lowercase = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
lowercase = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
lowercase = layer.output
lowercase = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
lowercase = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
lowercase = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
lowercase = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowercase = tokenizer.encode_plus(lowerCAmelCase__ )["""input_ids"""]
# Get gluon output
lowercase = mx.nd.array([input_ids] )
lowercase = original_bort(inputs=lowerCAmelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(lowerCAmelCase__ )
hf_bort_model.eval()
lowercase = tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" )
lowercase = hf_bort_model(**lowerCAmelCase__ )[0]
lowercase = output_gluon[0].asnumpy()
lowercase = output_hf[0].detach().numpy()
lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase = np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 197 | """simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCAmelCase : Optional[Any] =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCAmelCase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCAmelCase : Optional[int] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCAmelCase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCAmelCase : Optional[int] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCAmelCase : int =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCAmelCase : List[str] =(
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
lowercase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
lowercase , lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0 ) -> Optional[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
lowercase = poker_hands.copy()
shuffle(lowerCAmelCase__ )
lowercase = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = PokerHand("""2C 4S AS 3D 5C""" )
lowercase = True
lowercase = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = 0
lowercase = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowercase = os.path.join(lowerCAmelCase__ , """poker_hands.txt""" )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase = line[:1_4].strip()
lowercase = line[1_5:].strip()
lowercase , lowercase = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
lowercase = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 197 | 1 |
from manim import *
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
a__ = Rectangle(height=0.5 , width=0.5 )
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''CPU''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE )
a__ = [mem.copy() for i in range(1 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''GPU''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
gpu.align_to(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''Model''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , )
a__ = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=2_4 , )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE ) , Write(SCREAMING_SNAKE_CASE ) )
self.add(SCREAMING_SNAKE_CASE )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE ):
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
a__ = 0.46 / 4
a__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE )
self.play(*SCREAMING_SNAKE_CASE )
self.wait()
| 194 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=3_6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = embedding_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_hidden_groups
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def _UpperCAmelCase ( self ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = AlbertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , sentence_order_label=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
a__ = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = self.num_labels
a__ = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = self.num_labels
a__ = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = self.num_choices
a__ = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = True
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
a__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def _UpperCAmelCase ( self ) -> Dict:
a__ = AlbertModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = AlbertModel.from_pretrained('''albert-base-v2''' )
a__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
a__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
a__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 194 | 1 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
snake_case_ : Union[str, Any] = TOKENIZER_CLASSES
else:
snake_case_ : Optional[Any] = {tokenizer_name: getattr(__UpperCAmelCase ,tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
snake_case_ : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
snake_case_ : Optional[Any] = True
if checkpoint_name is None:
snake_case_ : Any = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case_ : int = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
snake_case_ : List[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase ,force_download=__UpperCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case_, snake_case_ : Optional[Any] = checkpoint.split("/" )
snake_case_ : Union[str, Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
elif add_prefix:
snake_case_ : Optional[int] = checkpoint
snake_case_ : Dict = dump_path
else:
snake_case_ : Dict = None
snake_case_ : int = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case_ : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case_ : List[Any] = file_path.split(__UpperCAmelCase )[-1][0]
if next_char == "/":
snake_case_ : Optional[Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
snake_case_ : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
snake_case_ : str = tokenizer.save_pretrained(
__UpperCAmelCase ,legacy_format=__UpperCAmelCase ,filename_prefix=__UpperCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__UpperCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Any = "ZinengTang/tvlt-base"
__UpperCAmelCase : str = tempfile.mkdtemp()
def _snake_case ( self , **UpperCamelCase_ ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def _snake_case ( self , **UpperCamelCase_ ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def _snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_feature_extractor()
__UpperCAmelCase : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_feature_extractor()
__UpperCAmelCase : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = np.ones([1_20_00] )
__UpperCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="np" )
__UpperCAmelCase : Union[str, Any] = processor(audio=lowerCamelCase__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ):
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
__UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
__UpperCAmelCase : List[str] = np.ones([3, 2_24, 2_24] )
__UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="np" )
__UpperCAmelCase : str = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : str = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
__UpperCAmelCase : str = np.ones([1_20_00] )
__UpperCAmelCase : str = np.ones([3, 2_24, 2_24] )
__UpperCAmelCase : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _snake_case ( self ):
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 168 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( lowercase__ = "isbn/0140328726" ):
lowercase__ = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase__ = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowercase__ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _A ( lowercase__ ):
lowercase__ = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__ = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase__ = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__A = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 325 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = "linear"
_UpperCamelCase : Tuple = "cosine"
_UpperCamelCase : List[Any] = "cosine_with_restarts"
_UpperCamelCase : Dict = "polynomial"
_UpperCamelCase : Optional[Any] = "constant"
_UpperCamelCase : Optional[int] = "constant_with_warmup"
_UpperCamelCase : List[str] = "piecewise_constant"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> str:
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[str]:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
_lowercase : Union[str, Any] = {}
_lowercase : Union[str, Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_lowercase , _lowercase : Any = rule_str.split(':' )
_lowercase : Any = int(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = float(SCREAMING_SNAKE_CASE )
_lowercase : Any = value
_lowercase : Optional[Any] = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowercase : Any = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[int]:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
_lowercase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> int:
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
_lowercase : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1E-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> Dict:
_lowercase : List[Any] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowercase : Optional[int] = lr_init - lr_end
_lowercase : List[str] = num_training_steps - num_warmup_steps
_lowercase : Any = 1 - (current_step - num_warmup_steps) / decay_steps
_lowercase : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Any:
_lowercase : Union[str, Any] = SchedulerType(SCREAMING_SNAKE_CASE )
_lowercase : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : int = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[Any] ):
inspect_dataset(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: int ):
inspect_metric(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Tuple = get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] ):
with pytest.raises(_lowerCamelCase ):
get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset_config_names(_lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : str = get_dataset_infos(_lowerCamelCase )
assert list(infos.keys() ) == expected_configs
__SCREAMING_SNAKE_CASE : List[Any] = expected_configs[0]
assert expected_config in infos
__SCREAMING_SNAKE_CASE : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : Tuple = get_dataset_infos(_lowerCamelCase )
assert expected_config in infos
__SCREAMING_SNAKE_CASE : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: List[Any] , _lowerCamelCase: Dict ):
with pytest.raises(_lowerCamelCase ):
get_dataset_split_names(_lowerCamelCase , config_name=_lowerCamelCase ) | 578 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Tuple = ''''''
_A : Dict = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
super().__init__(self , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = repo_info
__SCREAMING_SNAKE_CASE : Dict = token
__SCREAMING_SNAKE_CASE : Dict = None
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE : str = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"""name""": str(lowerCAmelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str = "rb" , **lowerCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Dict = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=False , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE : str = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = p.parent
if root == path:
__SCREAMING_SNAKE_CASE : int = f
__SCREAMING_SNAKE_CASE : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 578 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = []
for part_id in partition_order:
snake_case_ : Any = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_a ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Optional[Any] = spark.range(100 ).repartition(1 )
snake_case_ : Tuple = Spark(_a )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : str = spark.range(10 ).repartition(2 )
snake_case_ : int = [1, 0]
snake_case_ : List[Any] = _generate_iterable_examples(_a , _a ) # Reverse the partitions.
snake_case_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_, snake_case_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : List[Any] = spark.range(10 ).repartition(1 )
snake_case_ : List[Any] = SparkExamplesIterable(_a )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case_ : Dict = lambda _a : x.reverse()
snake_case_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0] )
snake_case_ : Tuple = SparkExamplesIterable(_a ).shuffle_data_sources(_a )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Any = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ : Any = SparkExamplesIterable(_a ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2] )
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : List[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ : int = SparkExamplesIterable(_a ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3] )
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Tuple = spark.range(100 ).repartition(1 )
snake_case_ : Dict = Spark(_a )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 485 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase__ : List[str] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __lowercase ( _a ):
snake_case_ : List[str] = {}
state_dict.pop('''pixel_mean''' , _a )
state_dict.pop('''pixel_std''' , _a )
snake_case_ : Union[str, Any] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ : Optional[int] = key.replace(_a , _a )
if re.match(_a , _a ):
snake_case_ : Union[str, Any] = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
snake_case_ : Optional[int] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
snake_case_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
snake_case_ : List[Any] = key.replace('''layers.2''' , '''proj_out''' )
snake_case_ : Optional[Any] = value
snake_case_ : Tuple = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase ( _a , _a , _a , _a="ybelkada/segment-anything" ):
snake_case_ : Optional[Any] = hf_hub_download(_a , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
snake_case_ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ : Optional[Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ : Union[str, Any] = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
snake_case_ : Tuple = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ : List[str] = SamConfig(
vision_config=_a , )
snake_case_ : Tuple = torch.load(_a , map_location='''cpu''' )
snake_case_ : Optional[Any] = replace_keys(_a )
snake_case_ : Any = SamImageProcessor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=_a )
snake_case_ : Tuple = SamModel(_a )
hf_model.load_state_dict(_a )
snake_case_ : Tuple = hf_model.to('''cuda''' )
snake_case_ : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
snake_case_ : Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
snake_case_ : Tuple = [[[400, 650]]]
snake_case_ : List[str] = [[1]]
snake_case_ : Optional[int] = processor(images=np.array(_a ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case_ : Optional[Any] = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case_ : Tuple = ((75, 275, 1_725, 850),)
snake_case_ : Optional[Any] = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case_ : Union[str, Any] = [[[400, 650], [800, 650]]]
snake_case_ : Optional[int] = [[1, 1]]
snake_case_ : Tuple = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
lowercase__ : Any = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowercase__ : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 485 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__UpperCAmelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case_ (__A : List[str] , __A : Optional[Any] , __A : Dict=None ) -> Union[str, Any]:
if rng is None:
__lowerCAmelCase : Union[str, Any] = random.Random()
__lowerCAmelCase : Tuple = 1
for dim in shape:
total_dims *= dim
__lowerCAmelCase : Optional[Any] = []
for _ in range(__A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__lowerCAmelCase : Dict = np.array(__A , dtype=jnp.intaa ).reshape(__A )
return output
def snake_case_ (__A : Union[str, Any] , __A : str=None ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = ids_tensor(__A , vocab_size=2 , rng=__A )
# make sure that at least one token is attended to for each batch
__lowerCAmelCase : int = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[Any] =None
lowerCamelCase : Optional[int] =()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : List[str] = inputs["""input_ids"""].shape[-1] // 2
__lowerCAmelCase : Dict = inputs["""input_ids"""][:max_batch_size, :sequence_length]
__lowerCAmelCase : Optional[Any] = jnp.ones_like(lowerCAmelCase )
__lowerCAmelCase : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowerCAmelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowerCAmelCase : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : str = False
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = pt_model_class(lowerCAmelCase ).eval()
__lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
__lowerCAmelCase : Dict = flax_model.generate(lowerCAmelCase ).sequences
__lowerCAmelCase : Tuple = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowerCAmelCase : List[str] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = jit(model.generate )
__lowerCAmelCase : int = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = self._get_input_ids_and_config()
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Any = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = jit(model.generate )
__lowerCAmelCase : Dict = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = self._get_input_ids_and_config()
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(lowerCAmelCase )
__lowerCAmelCase : List[str] = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Dict = jit(model.generate )
__lowerCAmelCase : Tuple = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = max_length
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : List[str] = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : List[Any] = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = max_length
__lowerCAmelCase : int = 0.8
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Optional[Any] = 0.3
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : List[Any] = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = jit(model.generate )
__lowerCAmelCase : Any = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : str = 1
__lowerCAmelCase : str = 8
__lowerCAmelCase : Dict = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = jit(model.generate )
__lowerCAmelCase : Any = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = max_length
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 8
__lowerCAmelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : str = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : Any = jit(model.generate )
__lowerCAmelCase : str = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : List[Any] = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : str = jit(model.generate )
__lowerCAmelCase : Union[str, Any] = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : Any = attention_mask.at[(0, 0)].set(0 )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : List[str] = jit(model.generate )
__lowerCAmelCase : List[str] = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : Optional[int] = attention_mask.at[(0, 0)].set(0 )
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(lowerCAmelCase )
__lowerCAmelCase : str = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
__lowerCAmelCase : int = jit(model.generate )
__lowerCAmelCase : Tuple = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__lowerCAmelCase : Tuple = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__lowerCAmelCase : Optional[int] = """Hello world"""
__lowerCAmelCase : List[Any] = tokenizer(lowerCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , """do_samples""" ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , """foo""" ):
__lowerCAmelCase : int = {"""foo""": """bar"""}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 651 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ () -> Optional[Any]:
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCAmelCase : Dict = parser.parse_args()
return args.f
def snake_case_ (__A : Dict , __A : List[str]="eval" ) -> int:
__lowerCAmelCase : int = os.path.join(__A , f'''{split}_results.json''' )
if os.path.exists(__A ):
with open(__A , """r""" ) as f:
return json.load(__A )
raise ValueError(f'''can\'t find {path}''' )
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Optional[Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_glue.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Any = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_clm_flax.main()
__lowerCAmelCase : int = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_summarization_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_mlm_flax.main()
__lowerCAmelCase : List[Any] = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_ta_mlm_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_ner.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_qa.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 651 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : Tuple ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_snake_case = deepcopy(_lowerCamelCase )
elif os.path.exists(_lowerCamelCase ):
with io.open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
_snake_case = json.load(_lowerCamelCase )
else:
try:
_snake_case = baseaa.urlsafe_baadecode(_lowerCamelCase ).decode('''utf-8''' )
_snake_case = json.loads(_lowerCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
_snake_case = config
self.set_stage_and_offload()
def lowercase ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_snake_case = self.get_value('''zero_optimization.stage''' , -1 )
# offload
_snake_case = False
if self.is_zeroa() or self.is_zeroa():
_snake_case = set(['''cpu''', '''nvme'''] )
_snake_case = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_snake_case = True
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple ):
_snake_case = self.config
# find the config node of interest if it exists
_snake_case = ds_key_long.split('''.''' )
_snake_case = nodes.pop()
for node in nodes:
_snake_case = config.get(_lowerCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : List[Any]=None ):
_snake_case , _snake_case = self.find_config_node(_lowerCamelCase )
if config is None:
return default
return config.get(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False ):
_snake_case = self.config
# find the config node of interest if it exists
_snake_case = ds_key_long.split('''.''' )
for node in nodes:
_snake_case = config
_snake_case = config.get(_lowerCamelCase )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] ):
_snake_case = self.get_value(_lowerCamelCase )
return False if value is None else bool(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : str ):
_snake_case = self.get_value(_lowerCamelCase )
return False if value is None else not bool(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
return self._stage == 2
def lowercase ( self : Optional[int] ):
return self._stage == 3
def lowercase ( self : Union[str, Any] ):
return self._offload
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : Tuple ):
_snake_case = engine
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Dict , **_lowerCamelCase : Dict ):
# runs backpropagation and handles mixed precision
self.engine.backward(_lowerCamelCase , **_lowerCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase__ ( A_ ):
def __init__( self : str , _lowerCamelCase : str ):
super().__init__(_lowerCamelCase , device_placement=_lowerCamelCase , scaler=_lowerCamelCase )
_snake_case = hasattr(self.optimizer , '''overflow''' )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any]=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase ( self : Dict ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase__ ( A_ ):
def __init__( self : Any , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=0.0_0_1 , _lowerCamelCase : List[str]=0 , **_lowerCamelCase : List[Any] ):
_snake_case = params
_snake_case = lr
_snake_case = weight_decay
_snake_case = kwargs
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any=0 , **_lowerCamelCase : str ):
_snake_case = optimizer
_snake_case = total_num_steps
_snake_case = warmup_num_steps
_snake_case = kwargs
| 430 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = '#'
class lowerCAmelCase__ :
def __init__( self : List[Any] ):
_snake_case = {}
def lowercase ( self : str , _lowerCamelCase : str ):
_snake_case = self._trie
for char in text:
if char not in trie:
_snake_case = {}
_snake_case = trie[char]
_snake_case = True
def lowercase ( self : Dict , _lowerCamelCase : str ):
_snake_case = self._trie
for char in prefix:
if char in trie:
_snake_case = trie[char]
else:
return []
return self._elements(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : dict ):
_snake_case = []
for c, v in d.items():
_snake_case = [''' '''] if c == END else [(c + s) for s in self._elements(_lowerCamelCase )]
result.extend(_lowerCamelCase )
return tuple(_lowerCamelCase )
UpperCAmelCase__ = Trie()
UpperCAmelCase__ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> tuple:
_snake_case = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def _UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 430 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase_: Tuple = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCamelCase_: Any = str(bin(lowerCamelCase ) )[2:]
UpperCamelCase_: str = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A__ ( lowerCamelCase , lowerCamelCase=() , lowerCamelCase=None , lowerCamelCase="no" , lowerCamelCase="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: int = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
UpperCamelCase_: str = True
elif "IPython" in sys.modules:
UpperCamelCase_: Tuple = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
UpperCamelCase_: Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
UpperCamelCase_: str = 8
UpperCamelCase_: int = PrepareForLaunch(lowerCamelCase , distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(lowerCamelCase , args=lowerCamelCase , nprocs=lowerCamelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase , master_addr="""127.0.01""" , master_port=lowerCamelCase , mixed_precision=lowerCamelCase ):
UpperCamelCase_: Optional[int] = PrepareForLaunch(lowerCamelCase , distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(lowerCamelCase , args=lowerCamelCase , nprocs=lowerCamelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase=() , lowerCamelCase=2 ) -> List[str]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
UpperCamelCase_: str = PrepareForLaunch(lowerCamelCase , debug=lowerCamelCase )
start_processes(lowerCamelCase , args=lowerCamelCase , nprocs=lowerCamelCase , start_method="""fork""" )
| 548 | 1 |
'''simple docstring'''
from ....utils import logging
A : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( a_ ):
def __init__( self : int, _snake_case : Dict, _snake_case : Optional[int]=None, _snake_case : str=2_048 ):
'''simple docstring'''
snake_case : int =config.__dict__
snake_case : Dict =modal_hidden_size
if num_labels:
snake_case : List[str] =num_labels
| 136 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Optional[Any] = {}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'llama'
__UpperCAmelCase = ['past_key_values']
def __init__( self : Dict, _snake_case : Union[str, Any]=32_000, _snake_case : int=4_096, _snake_case : Dict=11_008, _snake_case : Union[str, Any]=32, _snake_case : int=32, _snake_case : List[Any]=None, _snake_case : Union[str, Any]="silu", _snake_case : Dict=2_048, _snake_case : List[str]=0.02, _snake_case : int=1E-6, _snake_case : Any=True, _snake_case : Tuple=0, _snake_case : Tuple=1, _snake_case : List[Any]=2, _snake_case : int=1, _snake_case : Optional[int]=False, _snake_case : Union[str, Any]=None, **_snake_case : List[str], ):
'''simple docstring'''
snake_case : List[Any] =vocab_size
snake_case : Union[str, Any] =max_position_embeddings
snake_case : Union[str, Any] =hidden_size
snake_case : Optional[Any] =intermediate_size
snake_case : List[str] =num_hidden_layers
snake_case : Optional[int] =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case : Optional[Any] =num_attention_heads
snake_case : List[Any] =num_key_value_heads
snake_case : Any =hidden_act
snake_case : Optional[Any] =initializer_range
snake_case : List[str] =rms_norm_eps
snake_case : Optional[Any] =pretraining_tp
snake_case : Dict =use_cache
snake_case : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, tie_word_embeddings=_snake_case, **_snake_case, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
snake_case : Tuple =self.rope_scaling.get('''type''', _snake_case )
snake_case : str =self.rope_scaling.get('''factor''', _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_snake_case, _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 136 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('num_inference_steps', 5_0),)
def _UpperCAmelCase ( self , **a__ ) -> List[str]:
A = {"""num_train_timesteps""": 1000}
config.update(**a__ )
return config
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> Tuple:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> int:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **a__ ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Any:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A = dummy_past_residuals[:]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.full_loop()
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 641 | 0 |
'''simple docstring'''
def _lowercase ( lowercase__ = 1_0_0_0 ):
__lowerCAmelCase : Tuple = 1, 1
__lowerCAmelCase : Optional[int] = 2
while True:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Tuple = fa + fa
__lowerCAmelCase : List[Any] = fa, f
index += 1
for _ in str(lowercase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 702 |
from math import factorial
def _lowercase ( lowercase__ , lowercase__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(lowercase__ ) // (factorial(lowercase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
"If a class of 40 students must be arranged into groups of",
F"4 for group projects, there are {combinations(40, 4)} ways",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"are {combinations(10, 3)} ways that first, second and",
"third place can be awarded.",
)
| 583 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = 0
while number > 0:
a = number % 1_0
sum_of_digits += last_digit
a = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
"""simple docstring"""
a = factorial(snake_case_ )
a = split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 387 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int]=7 ,__lowerCamelCase : Optional[int]=3 ,__lowerCamelCase : Any=18 ,__lowerCamelCase : List[Any]=30 ,__lowerCamelCase : Optional[Any]=4_00 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size if size is not None else {'''height''': 18, '''width''': 20}
a = do_thumbnail
a = do_align_axis
a = do_pad
a = do_normalize
a = image_mean
a = image_std
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
a = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
a = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
a = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 387 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
UpperCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
UpperCAmelCase : Optional[Any] = '''</w>'''
UpperCAmelCase : Union[str, Any] = '''@@ '''
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : Tuple = set()
__A : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : List[str] = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase : int = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class _A( __a ):
"""simple docstring"""
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="<s>" , _A="<pad>" , _A="</s>" , _A="<unk>" , _A=False , _A=None , **_A , ):
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , do_lower_case=snake_case__ , **snake_case__ , )
__A : Optional[int] = do_lower_case
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
__A : Optional[Any] = json.load(snake_case__ )
__A : int = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
__A : Union[str, Any] = None
__A : Any = None
else:
with open(snake_case__ , encoding='utf-8' ) as merges_handle:
__A : List[str] = merges_handle.read().split('\n' )[:-1]
__A : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
__A : Optional[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__A : int = {}
@property
def UpperCAmelCase_ ( self ):
return len(self.decoder )
def UpperCAmelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__A : Dict = get_pairs(snake_case__ )
if not pairs:
return token
while True:
__A : List[Any] = min(snake_case__ , key=lambda _A : self.bpe_ranks.get(snake_case__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : List[Any] = bigram
__A : Union[str, Any] = []
__A : List[str] = 0
while i < len(snake_case__ ):
try:
__A : List[Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A : str = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Dict = tuple(snake_case__ )
__A : Union[str, Any] = new_word
if len(snake_case__ ) == 1:
break
else:
__A : List[Any] = get_pairs(snake_case__ )
__A : List[Any] = ' '.join(snake_case__ )
if word == "\n " + BPE_TOKEN_MERGES:
__A : Optional[Any] = '\n' + BPE_TOKEN_MERGES
if word.endswith(snake_case__ ):
__A : Tuple = word.replace(snake_case__ , '' )
__A : Tuple = word.replace(' ' , snake_case__ )
__A : Union[str, Any] = word
return word
def UpperCAmelCase_ ( self , _A ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
__A : int = text.lower()
__A : Tuple = text.split()
__A : List[Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(snake_case__ ).split(' ' ) ) )
return split_tokens
def UpperCAmelCase_ ( self , _A ):
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = self.decoder.get(snake_case__ , self.unk_token )
return result
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = ' '.join(snake_case__ )
# make sure @@ tokens are concatenated
__A : Optional[int] = ''.join(string.split(snake_case__ ) )
return string
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__A : Any = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '\n' )
__A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(snake_case__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__A : Optional[int] = token_index
writer.write(' '.join(snake_case__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 706 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : Union[str, Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : Dict = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : List[str] = 0
__A : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : List[Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : Dict = 0
__A : str = 0
__A : Tuple = 0
__A : Optional[int] = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : Dict = 0
__A : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase : int = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPFeatureExtractor"]
__UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Union[str, Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = 42
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Dict , UpperCamelCase_ : PriorTransformer , UpperCamelCase_ : CLIPVisionModel , UpperCamelCase_ : CLIPImageProcessor , UpperCamelCase_ : HeunDiscreteScheduler , UpperCamelCase_ : ShapERenderer , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
if latents is None:
lowerCamelCase_ : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ : List[str] = latents.to(UpperCamelCase_ )
lowerCamelCase_ : int = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase_ : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Any = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Any = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ : str = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowerCamelCase_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ : List[str] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ : Dict = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 25 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCamelCase_ : List[Any] = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Union[str, Any] = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ : str = len(UpperCamelCase_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}""" )
lowerCamelCase_ : List[Any] = self._execution_device
lowerCamelCase_ : Optional[Any] = batch_size * num_images_per_prompt
lowerCamelCase_ : Optional[int] = guidance_scale > 1.0
lowerCamelCase_ : Any = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowerCamelCase_ : Dict = self.scheduler.timesteps
lowerCamelCase_ : List[Any] = self.prior.config.num_embeddings
lowerCamelCase_ : Optional[int] = self.prior.config.embedding_dim
lowerCamelCase_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ : Tuple = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Tuple = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ : List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = noise_pred.chunk(2 )
lowerCamelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ : str = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowerCamelCase_ : Dict = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowerCamelCase_ : Optional[int] = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCamelCase_ : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ : str = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 501 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase , )
| 717 | '''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(_UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCamelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCamelCase : Any = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = 50 # max width of layer names
_lowerCamelCase : Tuple = 70 # max width of quantizer names
def __a ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __a ( __lowerCAmelCase ) -> Optional[Any]:
if args.calibrator == "max":
SCREAMING_SNAKE_CASE : Dict = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
SCREAMING_SNAKE_CASE : List[Any] = 'histogram'
elif args.calibrator == "mse":
SCREAMING_SNAKE_CASE : Tuple = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
SCREAMING_SNAKE_CASE : Optional[int] = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> List[str]:
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCAmelCase , ['embeddings'] , which='weight' , _disabled=__lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCAmelCase , [''] , _disabled=__lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCAmelCase , args.quant_disable_keyword , _disabled=__lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCAmelCase , __lowerCAmelCase )
if args.clip_gelu:
clip_gelu(__lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCAmelCase )
def __a ( __lowerCAmelCase ) -> List[str]:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
def fusea(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = qq._amax.detach().item()
SCREAMING_SNAKE_CASE : str = qk._amax.detach().item()
SCREAMING_SNAKE_CASE : str = qv._amax.detach().item()
SCREAMING_SNAKE_CASE : Dict = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
qq._amax.fill_(__lowerCAmelCase )
qk._amax.fill_(__lowerCAmelCase )
qv._amax.fill_(__lowerCAmelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
SCREAMING_SNAKE_CASE : Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
SCREAMING_SNAKE_CASE : str = mod.weight.shape[0]
SCREAMING_SNAKE_CASE : int = mod._weight_quantizer._amax.detach()
SCREAMING_SNAKE_CASE : List[str] = torch.ones(__lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def __a ( __lowerCAmelCase ) -> Dict:
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
SCREAMING_SNAKE_CASE : Union[str, Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
SCREAMING_SNAKE_CASE : str = set(range(len(mod.weight.size() ) ) ) - axis_set
SCREAMING_SNAKE_CASE : List[str] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCAmelCase , keepdims=__lowerCAmelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
SCREAMING_SNAKE_CASE : Any = amax
def __a ( __lowerCAmelCase , __lowerCAmelCase=25 , __lowerCAmelCase=180 , __lowerCAmelCase=None ) -> List[Any]:
if ignore is None:
SCREAMING_SNAKE_CASE : List[str] = []
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : int = [ignore]
SCREAMING_SNAKE_CASE : Any = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
SCREAMING_SNAKE_CASE : Any = max(__lowerCAmelCase , len(__lowerCAmelCase ) )
for name, mod in model.named_modules():
SCREAMING_SNAKE_CASE : Any = getattr(__lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = getattr(__lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
if type(__lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCAmelCase ) is str and s in name]:
continue
SCREAMING_SNAKE_CASE : List[str] = F'''Act:{input_q.extra_repr()}'''
SCREAMING_SNAKE_CASE : Any = F'''Wgt:{weight_q.extra_repr()}'''
SCREAMING_SNAKE_CASE : Tuple = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowerCAmelCase ) <= line_width:
logger.info(__lowerCAmelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def __a ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE : str = 0
for name, mod in model.named_modules():
if isinstance(__lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCAmelCase , __lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="both" , **__lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE : Tuple = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_input_quantizer' ) or hasattr(__lowerCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
set_quantizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : int = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase ) | 352 |
def __a ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> list:
SCREAMING_SNAKE_CASE : int = length or len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Any = True
return list_data if not swapped else bubble_sort(__lowerCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 | 1 |
"""simple docstring"""
from collections.abc import Callable
class a_ :
def __init__( self : Dict , __UpperCamelCase : Callable | None = None ) ->None:
'''simple docstring'''
_UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase = {}
# Stores current size of heap.
_UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase = key or (lambda __UpperCamelCase : x)
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
_UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _snake_case ( self : List[str] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
_UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase ,_UpperCAmelCase = self.arr[j], self.arr[i]
def _snake_case ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int ) ->int:
'''simple docstring'''
_UpperCAmelCase = self._left(lowercase_ )
_UpperCAmelCase = self._right(lowercase_ )
_UpperCAmelCase = i
if left is not None and not self._cmp(lowercase_ , lowercase_ ):
_UpperCAmelCase = left
if right is not None and not self._cmp(lowercase_ , lowercase_ ):
_UpperCAmelCase = right
return valid_parent
def _snake_case ( self : Tuple , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = self._parent(lowercase_ )
while parent is not None and not self._cmp(lowercase_ , lowercase_ ):
self._swap(lowercase_ , lowercase_ )
_UpperCAmelCase ,_UpperCAmelCase = parent, self._parent(lowercase_ )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = self._get_valid_parent(lowercase_ )
while valid_parent != index:
self._swap(lowercase_ , lowercase_ )
_UpperCAmelCase ,_UpperCAmelCase = valid_parent, self._get_valid_parent(lowercase_ )
def _snake_case ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
_UpperCAmelCase = [item, self.key(lowercase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _snake_case ( self : str , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase = self.arr[self.size - 1]
_UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase_ )] )
else:
_UpperCAmelCase = [item, self.key(lowercase_ )]
_UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _snake_case ( self : Optional[Any] ) ->tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def _snake_case ( self : List[Any] ) ->tuple | None:
'''simple docstring'''
_UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowercase_: Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase_):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}')
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
snake_case__ : Tuple = try_infer_format_from_ext(args.input) if args.format == """infer""" else args.format
snake_case__ : Any = PipelineDataFormat.from_str(
format=UpperCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCAmelCase_ , UpperCAmelCase_)
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : Any , __a : Pipeline , __a : PipelineDataFormat ):
snake_case__ : Optional[int] = nlp
snake_case__ : List[str] = reader
@staticmethod
def lowercase ( __a : ArgumentParser ):
snake_case__ : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__a , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__a , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__a , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__a , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__a , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__a , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__a , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__a , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__a )
def lowercase ( self : List[Any] ):
snake_case__ , snake_case__ : Optional[Any] = self._nlp, []
for entry in self._reader:
snake_case__ : List[Any] = nlp(**__a ) if self._reader.is_multi_columns else nlp(__a )
if isinstance(__a , __a ):
outputs.append(__a )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case__ : str = self._reader.save_binary(__a )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(__a )
| 648 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase_: Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_)
lowercase_: Dict = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = list(s_dict.keys())
for key in keys:
snake_case__ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case__ : Union[str, Any] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_)
print(F'{key} -> {new_key}')
snake_case__ : Dict = s_dict.pop(UpperCAmelCase_)
return s_dict
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ , snake_case__ : Any = emb.weight.shape
snake_case__ : List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_)
snake_case__ : int = emb.weight.data
return lin_layer
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
snake_case__ : Dict = os.path.basename(UpperCAmelCase_)
snake_case__ : Tuple = url.split("""/""")[-2]
snake_case__ : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if os.path.exists(UpperCAmelCase_) and not os.path.isfile(UpperCAmelCase_):
raise RuntimeError(F'{download_target} exists and is not a regular file')
if os.path.isfile(UpperCAmelCase_):
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file')
with urllib.request.urlopen(UpperCAmelCase_) as source, open(UpperCAmelCase_ , """wb""") as output:
with tqdm(
total=int(source.info().get("""Content-Length""")) , ncols=80 , unit="""iB""" , unit_scale=UpperCAmelCase_ , unit_divisor=1_024) as loop:
while True:
snake_case__ : Union[str, Any] = source.read(8_192)
if not buffer:
break
output.write(UpperCAmelCase_)
loop.update(len(UpperCAmelCase_))
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""")
return model_bytes
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case__ : List[Any] = _download(_MODELS[checkpoint_path])
else:
snake_case__ : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""")
snake_case__ : Union[str, Any] = original_checkpoint["""dims"""]
snake_case__ : Optional[int] = original_checkpoint["""model_state_dict"""]
snake_case__ : int = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCAmelCase_)
rename_keys(UpperCAmelCase_)
snake_case__ : List[Any] = True
snake_case__ : Dict = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
snake_case__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
snake_case__ : int = WhisperForConditionalGeneration(UpperCAmelCase_)
snake_case__ , snake_case__ : Tuple = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
if len(UpperCAmelCase_) > 0 and not set(UpperCAmelCase_) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}')
if tie_embeds:
snake_case__ : Dict = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
snake_case__ : Optional[int] = proj_out_weights
model.save_pretrained(UpperCAmelCase_)
if __name__ == "__main__":
lowercase_: int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_: int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 648 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionControlNetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
__SCREAMING_SNAKE_CASE : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
snake_case_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
snake_case_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(snake_case )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case , snake_case=0 ):
if str(snake_case ).startswith('mps' ):
snake_case_ = torch.manual_seed(snake_case )
else:
snake_case_ = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ = 2
snake_case_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , )
snake_case_ = floats_tensor(control_image.shape , rng=random.Random(snake_case ) ).to(snake_case )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionControlNetImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(snake_case ):
if isinstance(snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
snake_case_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(snake_case )
torch.manual_seed(0 )
snake_case_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(snake_case )
torch.manual_seed(0 )
snake_case_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(snake_case )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = MultiControlNetModel([controlneta, controlneta] )
snake_case_ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case , snake_case=0 ):
if str(snake_case ).startswith('mps' ):
snake_case_ = torch.manual_seed(snake_case )
else:
snake_case_ = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ = 2
snake_case_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , ),
]
snake_case_ = floats_tensor(control_image[0].shape , rng=random.Random(snake_case ) ).to(snake_case )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a ( self ):
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
pipe.to(snake_case )
snake_case_ = 10.0
snake_case_ = 4
snake_case_ = self.get_dummy_inputs(snake_case )
snake_case_ = steps
snake_case_ = scale
snake_case_ = pipe(**snake_case )[0]
snake_case_ = self.get_dummy_inputs(snake_case )
snake_case_ = steps
snake_case_ = scale
snake_case_ = pipe(**snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
snake_case_ = self.get_dummy_inputs(snake_case )
snake_case_ = steps
snake_case_ = scale
snake_case_ = pipe(**snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
snake_case_ = self.get_dummy_inputs(snake_case )
snake_case_ = steps
snake_case_ = scale
snake_case_ = pipe(**snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def a ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def a ( self ):
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
snake_case_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=snake_case , controlnet=snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = 'evil space-punk bird'
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
snake_case_ = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
snake_case_ = pipe(
snake_case , snake_case , control_image=snake_case , generator=snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 702 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
a__ : str = checkpoint
a__ : List[Any] = {}
a__ : List[str] = vae_state_dict["encoder.conv_in.weight"]
a__ : List[str] = vae_state_dict["encoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
a__ : str = vae_state_dict["encoder.conv_out.bias"]
a__ : Dict = vae_state_dict["encoder.norm_out.weight"]
a__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
a__ : Union[str, Any] = vae_state_dict["decoder.conv_in.weight"]
a__ : Optional[int] = vae_state_dict["decoder.conv_in.bias"]
a__ : Any = vae_state_dict["decoder.conv_out.weight"]
a__ : Optional[Any] = vae_state_dict["decoder.conv_out.bias"]
a__ : Dict = vae_state_dict["decoder.norm_out.weight"]
a__ : int = vae_state_dict["decoder.norm_out.bias"]
a__ : Optional[Any] = vae_state_dict["quant_conv.weight"]
a__ : List[Any] = vae_state_dict["quant_conv.bias"]
a__ : Optional[Any] = vae_state_dict["post_quant_conv.weight"]
a__ : Any = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a__ : int = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a__ : Tuple = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a__ : Any = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
a__ : Optional[int] = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
a__ : Optional[Any] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
a__ : str = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
a__ : Optional[Any] = renew_vae_resnet_paths(lowerCAmelCase__ )
a__ : Union[str, Any] = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
a__ : List[str] = [key for key in vae_state_dict if "encoder.mid.block" in key]
a__ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : Union[str, Any] = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
a__ : str = renew_vae_resnet_paths(lowerCAmelCase__ )
a__ : List[str] = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
a__ : Union[str, Any] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a__ : str = renew_vae_attention_paths(lowerCAmelCase__ )
a__ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
a__ : List[Any] = num_up_blocks - 1 - i
a__ : str = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
a__ : Dict = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
a__ : Any = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
a__ : Optional[int] = renew_vae_resnet_paths(lowerCAmelCase__ )
a__ : List[Any] = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
a__ : str = [key for key in vae_state_dict if "decoder.mid.block" in key]
a__ : int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[str] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
a__ : Optional[int] = renew_vae_resnet_paths(lowerCAmelCase__ )
a__ : Any = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
a__ : List[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a__ : Optional[int] = renew_vae_attention_paths(lowerCAmelCase__ )
a__ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , ) -> Tuple:
'''simple docstring'''
# Only support V1
a__ : Optional[Any] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a__ : Dict = io.BytesIO(r.content )
a__ : Union[str, Any] = OmegaConf.load(lowerCAmelCase__ )
a__ : List[Any] = 5_1_2
a__ : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a__ : List[str] = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
a__ : Union[str, Any] = f.get_tensor(lowerCAmelCase__ )
else:
a__ : Dict = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
a__ : Dict = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
a__ : str = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__UpperCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 642 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
a__ , a__ : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowercase__ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Any = {}
a__ : Tuple = R".*sequential.(\d+).*"
a__ : Tuple = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ : str = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
a__ : Optional[Any] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
a__ : int = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(lowerCAmelCase__ )//3}.linear." )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Tuple = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a__ : List[Any] = 1 if projecton_layer == 0 else 2
a__ : Dict = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
a__ : List[Any] = value
a__ : List[Any] = mixed_qkv.size(0 ) // 3
a__ : Optional[int] = mixed_qkv[:qkv_dim]
a__ : List[str] = mixed_qkv[qkv_dim : qkv_dim * 2]
a__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
a__ : Tuple = query_layer
a__ : int = key_layer
a__ : Optional[int] = value_layer
else:
a__ : List[str] = value
return model_state_dict
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=False ) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
a__ : Optional[int] = clap_model.state_dict()
a__ : Optional[Any] = rename_state_dict(lowerCAmelCase__ )
a__ : Union[str, Any] = ClapConfig()
a__ : Dict = enable_fusion
a__ : Any = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__UpperCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 642 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( __snake_case ):
a: Optional[int] = "naver-clova-ix/donut-base-finetuned-docvqa"
a: Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
a: Dict = "document_qa"
a: Dict = AutoProcessor
a: Optional[int] = VisionEncoderDecoderModel
a: str = ["image", "text"]
a: Any = ["text"]
def __init__( self: Optional[int] , *__UpperCamelCase: str , **__UpperCamelCase: Union[str, Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: "Image" , __UpperCamelCase: str ):
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = task_prompt.replace('''{user_input}''' , __UpperCamelCase )
_a = self.pre_processor.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors='''pt''' ).input_ids
_a = self.pre_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _A ( self: Union[str, Any] , __UpperCamelCase: Optional[int] ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCamelCase , ).sequences
def _A ( self: List[Any] , __UpperCamelCase: List[str] ):
_a = self.pre_processor.batch_decode(__UpperCamelCase )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_a = re.sub(R'''<.*?>''' , '''''' , __UpperCamelCase , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(__UpperCamelCase )
return sequence["answer"]
| 715 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCamelCase :Tuple = Lock()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a = min(_UpperCamelCase , _UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a = max(_UpperCamelCase , _UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> str:
_a = []
_a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a = Pipe()
_a = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a = temp_rs
_a = temp_rr
for i in range(1 , len(_UpperCamelCase ) - 1 ):
_a = Pipe()
_a = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a = temp_rs
_a = temp_rr
process_array_.append(
Process(
target=_UpperCamelCase , args=(
len(_UpperCamelCase ) - 1,
arr[len(_UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_UpperCamelCase ) ):
_a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> int:
_a = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_UpperCamelCase )
_a = odd_even_transposition(_UpperCamelCase )
print('''Sorted List\n''' )
print(*_UpperCamelCase )
if __name__ == "__main__":
main()
| 346 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.