code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Any , snake_case__: Optional[int] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
lowercase_ = (l + r) // 2
if v[m] >= key:
lowercase_ = m
else:
lowercase_ = m # noqa: E741
return r
def a ( snake_case__: list[int] ):
'''simple docstring'''
if len(snake_case__ ) == 0:
return 0
lowercase_ = [0] * len(snake_case__ )
lowercase_ = 1
lowercase_ = v[0]
for i in range(1 , len(snake_case__ ) ):
if v[i] < tail[0]:
lowercase_ = v[i]
elif v[i] > tail[length - 1]:
lowercase_ = v[i]
length += 1
else:
lowercase_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a ( snake_case__: Any ):
'''simple docstring'''
lowercase_ = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
lowercase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def a ( snake_case__: int , snake_case__: Any ):
'''simple docstring'''
if metric == "rouge2":
lowercase_ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowercase_ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowercase_ = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowercase_ = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
lowercase_ = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def a ( snake_case__: str , snake_case__: List[str] ):
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=snake_case__ , verbose=snake_case__ , )
class lowercase__( pl.Callback ):
"""simple docstring"""
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
lowercase_ = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowercase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ = od / '''test_results.txt'''
lowercase_ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''a+''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowercase_ = val.item()
lowercase_ = f'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE_ )
if not save_generations:
return
if "preds" in metrics:
lowercase_ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
try:
lowercase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ = pl_module.model.num_parameters()
lowercase_ = count_trainable_parameters(SCREAMING_SNAKE_CASE_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''test''' )
@rank_zero_only
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 97 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int]="attention" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE_ : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE_ : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE_ : Tuple = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict=False ):
"""simple docstring"""
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Dict = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
SCREAMING_SNAKE_CASE_ : str = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE_ : Tuple = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
SCREAMING_SNAKE_CASE_ : str = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Any ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def _snake_case ( lowerCAmelCase : dict , *, lowerCAmelCase : int , lowerCAmelCase : bool , lowerCAmelCase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = traverse_util.flatten_dict(variables["target"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"/".join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE_ : Dict = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE_ : List[Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , "encoder" , "pre_attention_layer_norm" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , "encoder" , "attention" )
SCREAMING_SNAKE_CASE_ : str = layer_norm
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k.T
SCREAMING_SNAKE_CASE_ : Any = o.T
SCREAMING_SNAKE_CASE_ : Optional[int] = q.T
SCREAMING_SNAKE_CASE_ : Optional[int] = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE_ : List[Any] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , "encoder" , "pre_mlp_layer_norm" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , "encoder" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = wi[0].T
SCREAMING_SNAKE_CASE_ : str = wi[1].T
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = wi.T
SCREAMING_SNAKE_CASE_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase , lowerCAmelCase , "encoder" ).T
SCREAMING_SNAKE_CASE_ : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
SCREAMING_SNAKE_CASE_ : List[Any] = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , "encoder" ).T
SCREAMING_SNAKE_CASE_ : str = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , "self_attention" )
SCREAMING_SNAKE_CASE_ : Any = layer_norm
SCREAMING_SNAKE_CASE_ : str = k.T
SCREAMING_SNAKE_CASE_ : List[str] = o.T
SCREAMING_SNAKE_CASE_ : Union[str, Any] = q.T
SCREAMING_SNAKE_CASE_ : Any = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE_ : Tuple = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , "encoder_decoder_attention" )
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm
SCREAMING_SNAKE_CASE_ : int = k.T
SCREAMING_SNAKE_CASE_ : Optional[Any] = o.T
SCREAMING_SNAKE_CASE_ : str = q.T
SCREAMING_SNAKE_CASE_ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , "pre_mlp_layer_norm" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = wi[0].T
SCREAMING_SNAKE_CASE_ : List[str] = wi[1].T
else:
SCREAMING_SNAKE_CASE_ : Any = wi.T
SCREAMING_SNAKE_CASE_ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_relpos_bias_lookup(lowerCAmelCase , lowerCAmelCase , "decoder" ).T
SCREAMING_SNAKE_CASE_ : List[str] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = old["decoder/logits_dense/kernel"].T
return new
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : bool ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
SCREAMING_SNAKE_CASE_ : str = state_dict["shared.weight"]
return state_dict
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = convert_tax_to_pytorch(
lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase , scalable_attention=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MTaConfig.from_json_file(lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE_ : int = UMTaEncoderModel(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 216 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : str = get_logger(__name__)
class a__ ( enum.Enum ):
A = 'all_checks'
A = 'basic_checks'
A = 'no_checks'
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE_ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"" ):
m.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 216 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCamelCase_ = '-' if number.startswith('-' ) else ''
lowerCamelCase_ = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f"""{negative}0b{binary_recursive(int(lowercase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 298 |
import argparse
import copy
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
with open(__lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
with open(__lowerCAmelCase ) as f:
__lowerCamelCase = f.read(1 )
__lowerCamelCase = start_node
__lowerCamelCase = []
__lowerCamelCase = start_node
__lowerCamelCase = 0
while visiting not in first_solution:
__lowerCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCAmelCase ) and k[0] not in first_solution:
__lowerCamelCase = k[1]
__lowerCamelCase = k[0]
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = distance_of_first_solution + int(__lowerCAmelCase )
__lowerCamelCase = best_node
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Dict:
__lowerCamelCase = []
for n in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
for kn in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
if n == kn:
continue
__lowerCamelCase = copy.deepcopy(__lowerCAmelCase )
__lowerCamelCase = kn
__lowerCamelCase = n
__lowerCamelCase = 0
for k in _tmp[:-1]:
__lowerCamelCase = _tmp[_tmp.index(__lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCamelCase = distance + int(i[1] )
_tmp.append(__lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase = 1
__lowerCamelCase = first_solution
__lowerCamelCase = []
__lowerCamelCase = distance_of_first_solution
__lowerCamelCase = solution
while count <= iters:
__lowerCamelCase = find_neighborhood(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = neighborhood[index_of_best_solution]
__lowerCamelCase = len(__lowerCAmelCase ) - 1
__lowerCamelCase = False
while not found:
__lowerCamelCase = 0
while i < len(__lowerCAmelCase ):
if best_solution[i] != solution[i]:
__lowerCamelCase = best_solution[i]
__lowerCamelCase = solution[i]
break
__lowerCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCamelCase = True
__lowerCamelCase = best_solution[:-1]
__lowerCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCamelCase = cost
__lowerCamelCase = solution
else:
__lowerCamelCase = index_of_best_solution + 1
__lowerCamelCase = neighborhood[index_of_best_solution]
if len(__lowerCAmelCase ) >= size:
tabu_list.pop(0 )
__lowerCamelCase = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( __lowerCAmelCase : Union[str, Any]=None ) -> Any:
__lowerCamelCase = generate_neighbours(args.File )
__lowerCamelCase , __lowerCamelCase = generate_first_solution(
args.File , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = tabu_search(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 298 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Any ) ->Tuple:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = ScoreSdeVeScheduler()
UpperCAmelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ , return_dict=A_ )[
0
]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
UpperCAmelCase_ = '''google/ncsnpp-church-256'''
UpperCAmelCase_ = UNetaDModel.from_pretrained(A_ )
UpperCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(A_ )
UpperCAmelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=A_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 704 | '''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43 | 0 |
_a : List[Any] = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Tuple = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
snake_case : str = 0
snake_case : Any = 0
while place < len(__magic_name__ ):
if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : List[Any] = []
for arabic, roman in ROMAN:
((snake_case) , (snake_case)) : Union[str, Any] = divmod(__magic_name__ , __magic_name__ )
result.append(roman * factor )
if number == 0:
break
return "".join(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=UpperCAmelCase__ , )
assert hasattr(self , '''env''' )
def lowerCAmelCase( self : str , UpperCAmelCase__ : str=1 ):
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
# create estimator
snake_case : List[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
snake_case : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCAmelCase__ )
| 598 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[int] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : List[Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : List[Any] = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__lowercase : int = TextIteratorStreamer(_snake_case )
__lowercase : int = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
__lowercase : Optional[int] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[Any] = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : str = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : List[Any] = greedy_ids[:, input_ids.shape[1] :]
__lowercase : Optional[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : int = TextStreamer(_snake_case , skip_prompt=_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : int = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__lowercase : Dict = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase : List[str] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_snake_case )
__lowercase : str = -1
__lowercase : Dict = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case , skip_special_tokens=_snake_case )
model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase : Dict = cs.out[:-1] # Remove the final "\n"
__lowercase : Any = tokenizer(_snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case_ ( self : Any ):
__lowercase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : Dict = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = TextIteratorStreamer(_snake_case , timeout=0.0_01 )
__lowercase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[int] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_snake_case ):
__lowercase : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 284 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : List[Any]=13 , _snake_case : int=32 , _snake_case : int=3 , _snake_case : Any=4 , _snake_case : Optional[int]=[10, 20, 30, 40] , _snake_case : Optional[Any]=[2, 2, 3, 2] , _snake_case : Dict=True , _snake_case : List[Any]=True , _snake_case : int=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : Tuple=10 , _snake_case : Tuple=0.02 , _snake_case : List[str]=["stage2", "stage3", "stage4"] , _snake_case : Tuple=3 , _snake_case : int=None , ):
__lowercase : List[Any] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[Any] = image_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[str] = num_stages
__lowercase : Union[str, Any] = hidden_sizes
__lowercase : Optional[Any] = depths
__lowercase : List[Any] = is_training
__lowercase : List[str] = use_labels
__lowercase : Tuple = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Dict = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : str = out_features
__lowercase : Tuple = num_labels
__lowercase : Tuple = scope
__lowercase : Optional[Any] = num_stages
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Dict = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case_ ( self : int ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_snake_case , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_snake_case , loss_ignore_index=255 , num_labels=self.num_labels , )
def snake_case_ ( self : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] ):
__lowercase : Optional[int] = UperNetForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A__ : Union[str, Any] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
A__ : Union[str, Any] = False
A__ : Optional[Any] = False
A__ : int = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : List[str] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = UperNetModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case_ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : str ):
return
def snake_case_ ( self : str ):
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(_snake_case )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Dict = [*signature.parameters.keys()]
__lowercase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case_ ( self : str ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case_ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case_ ( self : int ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : List[str] ):
pass
def snake_case_ ( self : List[Any] ):
def check_hidden_states_output(_snake_case : int , _snake_case : Optional[int] , _snake_case : Dict ):
__lowercase : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowercase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Any = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case_ ( self : Tuple ):
__lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Dict = _config_zero_init(_snake_case )
__lowercase : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case_ ( self : str ):
pass
@slow
def snake_case_ ( self : Dict ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase_ ( ) -> Optional[int]:
__lowercase : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__lowercase : int = Image.open(__lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__lowercase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_snake_case )
__lowercase : Tuple = prepare_img()
__lowercase : Optional[int] = processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
with torch.no_grad():
__lowercase : Dict = model(**_snake_case )
__lowercase : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowercase : List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__lowercase : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_snake_case )
__lowercase : Any = prepare_img()
__lowercase : Union[str, Any] = processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
with torch.no_grad():
__lowercase : Tuple = model(**_snake_case )
__lowercase : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowercase : List[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
| 284 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a : List[str] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'rag'
a : Dict = True
def __init__( self : List[str] , __lowercase : Union[str, Any]=None , __lowercase : Any=True , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=None , __lowercase : str=None , __lowercase : Optional[Any]=" / " , __lowercase : Union[str, Any]=" // " , __lowercase : Any=5 , __lowercase : str=300 , __lowercase : Any=768 , __lowercase : List[str]=8 , __lowercase : Union[str, Any]="wiki_dpr" , __lowercase : Any="train" , __lowercase : int="compressed" , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : Any=False , __lowercase : List[Any]=False , __lowercase : str=0.0 , __lowercase : Optional[Any]=True , __lowercase : int=False , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=False , __lowercase : str=True , __lowercase : List[str]=None , **__lowercase : Optional[Any] , ) -> Dict:
super().__init__(
bos_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , is_encoder_decoder=__lowercase , prefix=__lowercase , vocab_size=__lowercase , **__lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCAmelCase : Dict = kwargs.pop("""question_encoder""" )
__UpperCAmelCase : List[Any] = question_encoder_config.pop("""model_type""" )
__UpperCAmelCase : int = kwargs.pop("""generator""" )
__UpperCAmelCase : Union[str, Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__UpperCAmelCase : Optional[Any] = AutoConfig.for_model(__lowercase , **__lowercase )
__UpperCAmelCase : Tuple = AutoConfig.for_model(__lowercase , **__lowercase )
__UpperCAmelCase : Tuple = reduce_loss
__UpperCAmelCase : Union[str, Any] = label_smoothing
__UpperCAmelCase : str = exclude_bos_score
__UpperCAmelCase : Tuple = do_marginalize
__UpperCAmelCase : List[str] = title_sep
__UpperCAmelCase : Any = doc_sep
__UpperCAmelCase : List[Any] = n_docs
__UpperCAmelCase : int = max_combined_length
__UpperCAmelCase : Optional[int] = dataset
__UpperCAmelCase : List[Any] = dataset_split
__UpperCAmelCase : List[str] = index_name
__UpperCAmelCase : Optional[int] = retrieval_vector_size
__UpperCAmelCase : str = retrieval_batch_size
__UpperCAmelCase : Optional[int] = passages_path
__UpperCAmelCase : Optional[int] = index_path
__UpperCAmelCase : Optional[Any] = use_dummy_dataset
__UpperCAmelCase : Optional[Any] = output_retrieved
__UpperCAmelCase : Optional[Any] = do_deduplication
__UpperCAmelCase : int = use_cache
if self.forced_eos_token_id is None:
__UpperCAmelCase : Any = getattr(self.generator , """forced_eos_token_id""" , __lowercase )
@classmethod
def UpperCAmelCase ( cls : Tuple , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : Optional[Any] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__lowercase )
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.question_encoder.to_dict()
__UpperCAmelCase : Dict = self.generator.to_dict()
__UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 63 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowercase : Optional[Any] = """http://www.mocksite.com/file1.txt"""
lowercase : str = """\"text\": [\"foo\", \"foo\"]"""
lowercase : Tuple = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __snake_case :
_a : Tuple= 200
_a : int= {"Content-Length": "100"}
_a : Tuple= {}
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return [bytes(snake_case ,"""utf-8""" )]
def _snake_case( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__ , """request""" , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = URL
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = url
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = [url]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = {"""train""": url}
lowercase : Union[str, Any] = """dummy"""
lowercase : List[Any] = """downloads"""
lowercase : Optional[Any] = tmp_path
lowercase : Dict = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE__ )
lowercase : str = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [downloaded_paths]
lowercase : Any = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
lowercase : Tuple = downloaded_paths.values()
lowercase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowercase : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowercase : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = {"""train""": filename}
lowercase : int = """dummy"""
lowercase : List[Any] = xz_file.parent
lowercase : Union[str, Any] = """extracted"""
lowercase : List[str] = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__ , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : List[Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [extracted_paths]
lowercase : Optional[Any] = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
lowercase : Optional[Any] = extracted_paths.values()
lowercase : Optional[int] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase : List[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__ , etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase : Optional[int] = extracted_path.read_text()
lowercase : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
lowercase : Optional[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 336 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCAmelCase ( A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : List[Any] = job['''started_at''']
UpperCAmelCase_ : List[Any] = job['''completed_at''']
UpperCAmelCase_ : Optional[Any] = date_parser.parse(A )
UpperCAmelCase_ : List[Any] = date_parser.parse(A )
UpperCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase_ : Any = start
UpperCAmelCase_ : Dict = end
UpperCAmelCase_ : Tuple = duration_in_min
return job_info
def __UpperCAmelCase ( A : int , A : int=None ) -> List[str]:
UpperCAmelCase_ : Tuple = None
if token is not None:
UpperCAmelCase_ : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ : Any = requests.get(A , headers=A ).json()
UpperCAmelCase_ : Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
UpperCAmelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : List[str] = requests.get(url + F"&page={i + 2}" , headers=A ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_UpperCamelCase : str = parser.parse_args()
_UpperCamelCase : str = get_job_time(args.workflow_run_id)
_UpperCamelCase : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 708 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ : List[Any] = np.zeros_like(A )
UpperCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_UpperCamelCase : str = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_UpperCamelCase : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_UpperCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_UpperCamelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_UpperCamelCase : int = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 216 | 0 |
'''simple docstring'''
import re
def _UpperCAmelCase ( __A : str ):
if len(re.findall('''[ATCG]''' , __lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input("""Enter image url: """).strip()
print(F'Downloading image from {url} ...')
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'Done. Image saved to disk as {file_name}.')
| 218 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (__A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> Tuple:
__lowerCAmelCase : int = {}
if train_file is not None:
__lowerCAmelCase : Optional[Any] = [train_file]
if eval_file is not None:
__lowerCAmelCase : Dict = [eval_file]
if test_file is not None:
__lowerCAmelCase : Tuple = [test_file]
__lowerCAmelCase : Dict = datasets.load_dataset("""csv""" , data_files=__A )
__lowerCAmelCase : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCAmelCase : Optional[Any] = features_name.pop(__A )
__lowerCAmelCase : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCAmelCase : Optional[Any] = {label: i for i, label in enumerate(__A )}
__lowerCAmelCase : Union[str, Any] = tokenizer.model_input_names
__lowerCAmelCase : List[Any] = {}
if len(__A ) == 1:
for k in files.keys():
__lowerCAmelCase : Tuple = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding="""max_length""" ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
__lowerCAmelCase : Optional[int] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding="""max_length""" , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCAmelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
__lowerCAmelCase : Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCAmelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCAmelCase : Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCAmelCase : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCAmelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCAmelCase : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : int =field(metadata={"help": "Which column contains the label"} )
lowerCamelCase : str =field(default=a_ , metadata={"help": "The path of the training file"} )
lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the development file"} )
lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the test file"} )
lowerCamelCase : int =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : bool =field(default=a_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def snake_case_ () -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
__lowerCAmelCase : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCAmelCase : Tuple = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase : List[str] = trainer.evaluate()
__lowerCAmelCase : Any = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 218 | 1 |
from typing import Any
class snake_case__ :
def __init__( self : Optional[int] , _lowerCamelCase : Any ):
snake_case__ : int = data
snake_case__ : Dict = None
def __repr__( self : Tuple ):
return F'''Node({self.data})'''
class snake_case__ :
def __init__( self : int ):
snake_case__ : Optional[Any] = None
def __iter__( self : str ):
snake_case__ : Tuple = self.head
while node:
yield node.data
snake_case__ : Any = node.next
def __len__( self : Dict ):
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
return "->".join([str(_lowerCamelCase ) for item in self] )
def __getitem__( self : int , _lowerCamelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
snake_case__ : Any = self.head
for _ in range(_lowerCamelCase ):
snake_case__ : int = current.next
snake_case__ : Optional[Any] = data
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Any ):
self.insert_nth(len(self ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : str , _lowerCamelCase : Any ):
self.insert_nth(0 , _lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
snake_case__ : Union[str, Any] = Node(_lowerCamelCase )
if self.head is None:
snake_case__ : List[str] = new_node
elif index == 0:
snake_case__ : Optional[int] = self.head # link new_node to head
snake_case__ : List[str] = new_node
else:
snake_case__ : List[str] = self.head
for _ in range(index - 1 ):
snake_case__ : Dict = temp.next
snake_case__ : Tuple = temp.next
snake_case__ : Optional[Any] = new_node
def UpperCAmelCase__ ( self : Union[str, Any] ): # print every node data
print(self )
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.delete_nth(0 )
def UpperCAmelCase__ ( self : Optional[int] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
snake_case__ : Dict = self.head # default first node
if index == 0:
snake_case__ : Any = self.head.next
else:
snake_case__ : Union[str, Any] = self.head
for _ in range(index - 1 ):
snake_case__ : Optional[int] = temp.next
snake_case__ : Dict = temp.next
snake_case__ : int = temp.next.next
return delete_node.data
def UpperCAmelCase__ ( self : Tuple ):
return self.head is None
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : List[Any] = None
snake_case__ : Tuple = self.head
while current:
# Store the current node's next node.
snake_case__ : Dict = current.next
# Make the current node's next point backwards
snake_case__ : Any = prev
# Make the previous node be the current node
snake_case__ : List[str] = current
# Make the current node the next node (to progress iteration)
snake_case__ : Tuple = next_node
# Return prev in order to put the head at the end
snake_case__ : Optional[Any] = prev
def lowercase__( ):
snake_case__ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(A ) == i
linked_list.insert_nth(A , i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(A ) == "->".join(str(A ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(A ) == 9
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
snake_case__ : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A ) == "->".join(str(A ) for i in range(-8 , 1 ) )
def lowercase__( ):
snake_case__ : List[Any] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
snake_case__ : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
snake_case__ : str = linked_list.delete_head()
assert result == -9
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
snake_case__ : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
snake_case__ : str = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
from doctest import testmod
testmod()
snake_case__ : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(A )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
snake_case__ : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(A )
print(f'''length of linked_list is : {len(A )}''' )
if __name__ == "__main__":
main()
| 170 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__A : Union[str, Any] = parse(importlib.metadata.version("""torch"""))
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
SCREAMING_SNAKE_CASE = STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE ) )
return operation(SCREAMING_SNAKE_CASE , parse(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 450 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A : Tuple = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 450 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( _snake_case ):
__UpperCamelCase : int = 'decision_transformer'
__UpperCamelCase : Optional[Any] = ['past_key_values']
__UpperCamelCase : Tuple = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any ,lowerCamelCase : Optional[int]=17 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : str=128 ,lowerCamelCase : Optional[int]=4096 ,lowerCamelCase : str=True ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Dict=1024 ,lowerCamelCase : int=3 ,lowerCamelCase : Dict=1 ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Dict="relu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : str=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : str=1E-5 ,lowerCamelCase : List[Any]=0.02 ,lowerCamelCase : List[str]=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=5_0256 ,lowerCamelCase : str=5_0256 ,lowerCamelCase : Any=False ,lowerCamelCase : Dict=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = state_dim
__SCREAMING_SNAKE_CASE = act_dim
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = max_ep_len
__SCREAMING_SNAKE_CASE = action_tanh
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
| 109 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a = logging.get_logger(__name__)
a = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : Optional[Any] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = pretraining_tp
__SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : int = version.parse('1.12' )
def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase )
if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self._config.n_head
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 1E-3
def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 13
| 109 | 1 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__UpperCAmelCase ) , version.parse(__UpperCAmelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None ) -> None:
lowerCAmelCase__ : List[Any] = f"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = requirement, None, None
else:
lowerCAmelCase__ : Optional[Any] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __UpperCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
lowerCAmelCase__ : Any = match[0]
lowerCAmelCase__ : Tuple = want_full.split(""",""" ) # there could be multiple requirements
lowerCAmelCase__ : Optional[int] = {}
for w in want_range:
lowerCAmelCase__ : Optional[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __UpperCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
lowerCAmelCase__ : Dict = match[0]
lowerCAmelCase__ : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowerCAmelCase__ : Tuple = """.""".join([str(__UpperCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return
# check if any version is installed
try:
lowerCAmelCase__ : List[str] = importlib.metadata.version(__UpperCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : str = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__UpperCAmelCase , __UpperCAmelCase )
| 710 |
"""simple docstring"""
from math import isqrt
def lowercase_ ( __UpperCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCAmelCase ) + 1 ) )
def lowercase_ ( __UpperCAmelCase = 10**6 ) -> int:
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 507 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="retribert"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=8 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=128 , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Optional[Any] = share_encoders
lowerCAmelCase : str = projection_dim
| 645 |
"""simple docstring"""
from string import ascii_uppercase
lowerCAmelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 3_6:
raise ValueError("base must be <= 36" )
lowerCAmelCase : Any = ""
lowerCAmelCase : Dict = 0
lowerCAmelCase : Tuple = 0
while div != 1:
lowerCAmelCase , lowerCAmelCase : List[Any] = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if base >= 1_1 and 9 < mod < 3_6:
lowerCAmelCase : Any = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE )]
else:
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE )
new_value += actual_value
lowerCAmelCase : Dict = num // base
lowerCAmelCase : str = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 645 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase : Optional[int] = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __magic_name__ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , _a = " " ):
"""simple docstring"""
lowerCamelCase = sentence_delimiter
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return list(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for sent_idx, sentence in enumerate(_a ):
chars.extend(self.process_string(_a ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_a ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase : Optional[Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase : Dict = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase : Any = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCAmelCase : Optional[Any] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCAmelCase : int = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
_a , _a , truth_transform=_a , hypothesis_transform=_a , )["wer"]
lowerCamelCase = 0
lowerCamelCase = 0
for prediction, reference in zip(_a , _a ):
lowerCamelCase = jiwer.compute_measures(
_a , _a , truth_transform=_a , hypothesis_transform=_a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[str] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 533 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = tempfile.mkdtemp()
_snake_case = 8
# DPR tok
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(_snake_case , exist_ok=_snake_case )
_snake_case = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
_snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_snake_case = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(_snake_case , exist_ok=_snake_case )
_snake_case = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ):
_snake_case = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase( self ):
_snake_case = self.get_dummy_dataset()
_snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_snake_case = dataset
_snake_case = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = self.get_dummy_dataset()
_snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
_snake_case = os.path.join(self.tmpdirname , "dataset" )
_snake_case = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
_snake_case = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_snake_case = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def UpperCamelCase( self ):
_snake_case = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
_snake_case = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
_snake_case = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
_snake_case = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , "wb" ) )
_snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
_snake_case = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase( self ):
_snake_case = 1
_snake_case = self.get_dummy_canonical_hf_index_retriever()
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _snake_case )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase( self ):
_snake_case = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
_snake_case = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
_snake_case = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase( self ):
_snake_case = 1
_snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _snake_case )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase( self ):
_snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
_snake_case = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase( self ):
_snake_case = 1
_snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _snake_case )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase( self ):
_snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
_snake_case = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase( self ):
_snake_case = 1
_snake_case = self.get_dummy_legacy_index_retriever()
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , _snake_case )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase( self ):
_snake_case = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
_snake_case = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase( self ):
import torch
_snake_case = 1
_snake_case = self.get_dummy_canonical_hf_index_retriever()
_snake_case = [[5, 7], [10, 11]]
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
_snake_case = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
_snake_case = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors="pt" , )
_snake_case = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase( self ):
_snake_case = self.get_dpr_ctx_encoder_tokenizer()
_snake_case = 1
_snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
_snake_case = [[5, 7], [10, 11]]
_snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_snake_case = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , _snake_case ) # check for doc token related keys in dictionary.
| 672 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : int = 101 ):
__lowercase : Tuple = length
def __len__( self : str ):
return self.length
def __getitem__( self : List[Any] , _snake_case : List[str] ):
return i
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Union[str, Any] , _snake_case : List[str] ):
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase : str = nn.Linear(120 , 80 )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[int]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def snake_case_ ( self : Tuple ):
__lowercase : List[str] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : List[str] = F'--output_dir {output_dir}'.split()
__lowercase : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def snake_case_ ( self : str ):
__lowercase : Tuple = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : str = F'--output_dir {output_dir}'.split()
__lowercase : Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : int = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Dict = list(range(len(__lowerCAmelCase ) ) )
__lowercase : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : List[str] = None
| 509 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """efficientformer"""
def __init__( self , snake_case__ = [3, 2, 6, 4] , snake_case__ = [48, 96, 224, 448] , snake_case__ = [True, True, True, True] , snake_case__ = 448 , snake_case__ = 32 , snake_case__ = 4 , snake_case__ = 7 , snake_case__ = 5 , snake_case__ = 8 , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = 16 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = 1 , snake_case__ = True , snake_case__ = True , snake_case__ = 1E-5 , snake_case__ = "gelu" , snake_case__ = 0.02 , snake_case__ = 1E-12 , snake_case__ = 224 , snake_case__ = 1E-05 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = hidden_sizes
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : int = depths
_SCREAMING_SNAKE_CASE : List[Any] = mlp_expansion_ratio
_SCREAMING_SNAKE_CASE : List[Any] = downsamples
_SCREAMING_SNAKE_CASE : Any = dim
_SCREAMING_SNAKE_CASE : Any = key_dim
_SCREAMING_SNAKE_CASE : Tuple = attention_ratio
_SCREAMING_SNAKE_CASE : int = resolution
_SCREAMING_SNAKE_CASE : Dict = pool_size
_SCREAMING_SNAKE_CASE : Optional[int] = downsample_patch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = downsample_stride
_SCREAMING_SNAKE_CASE : Dict = downsample_pad
_SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
_SCREAMING_SNAKE_CASE : List[str] = num_metaad_blocks
_SCREAMING_SNAKE_CASE : List[Any] = distillation
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_layer_scale
_SCREAMING_SNAKE_CASE : Any = layer_scale_init_value
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : int = batch_norm_eps
| 721 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase__ : Sequence[float], lowerCamelCase__ : int, lowerCamelCase__ : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_SCREAMING_SNAKE_CASE : Any = (low + high) // 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = max_subarray(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = max_subarray(lowerCamelCase__, mid + 1, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = max_cross_sum(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase__ : Sequence[float], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> tuple[int, int, float]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = float("-inf" ), -1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = float("-inf" ), -1
_SCREAMING_SNAKE_CASE : int | float = 0
for i in range(lowerCamelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
_SCREAMING_SNAKE_CASE : int = summ
_SCREAMING_SNAKE_CASE : Tuple = i
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
_SCREAMING_SNAKE_CASE : int = summ
_SCREAMING_SNAKE_CASE : Any = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> float:
_SCREAMING_SNAKE_CASE : Any = [randint(1, lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
_SCREAMING_SNAKE_CASE : int = time.time()
max_subarray(lowerCamelCase__, 0, input_size - 1 )
_SCREAMING_SNAKE_CASE : List[str] = time.time()
return end - start
def _lowerCAmelCase ( ) -> None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_SCREAMING_SNAKE_CASE : int = [time_max_subarray(lowerCamelCase__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowerCamelCase__, lowerCamelCase__ ):
print(lowerCamelCase__, "\t\t", lowerCamelCase__ )
plt.plot(lowerCamelCase__, lowerCamelCase__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str , ):
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = 1_3
lowercase_ : Union[str, Any] = 7
lowercase_ : List[str] = True
lowercase_ : Dict = True
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = True
lowercase_ : Optional[int] = True
lowercase_ : str = False
lowercase_ : Tuple = False
lowercase_ : Any = False
lowercase_ : Union[str, Any] = 2
lowercase_ : Dict = 9_9
lowercase_ : Optional[Any] = 0
lowercase_ : Tuple = 3_2
lowercase_ : str = 2
lowercase_ : Any = 4
lowercase_ : Union[str, Any] = 0.1
lowercase_ : int = 0.1
lowercase_ : Any = 5_1_2
lowercase_ : Union[str, Any] = 1_6
lowercase_ : int = 2
lowercase_ : Dict = 0.02
lowercase_ : List[str] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[int] = "last"
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[Any] = None
lowercase_ : Union[str, Any] = 0
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowercase_ : Optional[int] = None
if self.use_input_lengths:
lowercase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : Optional[Any] = None
if self.use_token_type_ids:
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase_ : str = None
lowercase_ : Optional[Any] = None
lowercase_ : str = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Union[str, Any] , a : Any , a : Optional[int] , a : Any , a : Optional[Any] , a : str , ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFFlaubertModel(config=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [input_ids, input_mask]
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , a : Dict , a : Any , a : List[Any] , a : Dict , a : Optional[Any] , a : str , a : List[str] , a : int , a : Tuple , ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFFlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Optional[int] , a : int , a : Tuple , a : Any , a : int , a : str , a : List[str] , a : Optional[Any] , a : List[str] , a : Dict , ):
'''simple docstring'''
lowercase_ : str = TFFlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = {"input_ids": input_ids, "lengths": input_lengths}
lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : List[Any] , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : int , a : Tuple , a : Any , a : Optional[int] , a : Optional[Any] , a : Any , ):
'''simple docstring'''
lowercase_ : List[str] = TFFlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = {"input_ids": input_ids, "lengths": input_lengths}
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Any , a : Union[str, Any] , a : Dict , a : List[str] , a : int , a : Optional[int] , a : Tuple , a : Optional[Any] , a : str , a : Optional[int] , ):
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : int = TFFlaubertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] , a : str , a : List[Any] , a : Any , a : Union[str, Any] , a : Union[str, Any] , a : str , a : Tuple , a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = self.num_choices
lowercase_ : Tuple = TFFlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowercase_ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowercase_ : int = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowercase_ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[str] = config_and_inputs
lowercase_ : List[str] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__lowerCamelCase: List[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCamelCase: Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase: Dict = False
__lowerCamelCase: str = False
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : Dict , a : Dict ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = TFFlaubertModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=3_7 )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Union[str, Any] = TFFlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowercase_ : str = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )[0]
lowercase_ : List[str] = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
lowercase_ : int = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 620 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 | '''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowercase_ : str = precision
lowercase_ : List[str] = ceil(precision / 14 )
lowercase_ : Union[str, Any] = 426_880 * Decimal(10_005 ).sqrt()
lowercase_ : List[Any] = 1
lowercase_ : Optional[int] = 13_591_409
lowercase_ : Dict = Decimal(SCREAMING_SNAKE_CASE_ )
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_A = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 438 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Dict = list[tuple[int, int]]
_a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Node | None , ) -> Optional[int]:
__snake_case = pos_x
__snake_case = pos_y
__snake_case = (pos_y, pos_x)
__snake_case = goal_x
__snake_case = goal_y
__snake_case = g_cost
__snake_case = parent
__snake_case = self.calculate_heuristic()
def a ( self : Tuple ) -> float:
__snake_case = abs(self.pos_x - self.goal_x )
__snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> bool:
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ) -> str:
__snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ )
__snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , SCREAMING_SNAKE_CASE_ )
__snake_case = [self.start]
__snake_case = []
__snake_case = False
def a ( self : Optional[int] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__snake_case = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
__snake_case = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Node ) -> list[Node]:
__snake_case = []
for action in delta:
__snake_case = parent.pos_x + action[1]
__snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) )
return successors
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Node | None ) -> Path:
__snake_case = node
__snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_a : List[str] = (0, 0)
_a : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_a : int = GreedyBestFirst(init, goal)
_a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 56 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE = TypeVar("""T""")
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self :Tuple , UpperCamelCase__ :list[T] , UpperCamelCase__ :Callable[[T, T], T] ):
_a = None
_a = len(UpperCamelCase__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :T ):
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :int ): # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, lambda a, b: a + b)
def __a ( ):
"""simple docstring"""
for i in range(len(a ) ):
for j in range(a, len(a ) ):
_a = reduce(a, test_array[i : j + 1] )
_a = reduce(a, test_array[i : j + 1] )
_a = reduce(lambda a, a : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(a, a )
assert max_range == max_segment_tree.query(a, a )
assert sum_range == sum_segment_tree.query(a, a )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 388 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase ( snake_case_):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> Optional[Any]:
_UpperCamelCase =self._create_example_records()
_UpperCamelCase =Dataset.from_list(UpperCamelCase__ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(UpperCamelCase__ ):
self.assertDictEqual(UpperCamelCase__ , example_records[i] )
def UpperCamelCase__ ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase =self._create_example_records()
_UpperCamelCase =Dataset.from_list(UpperCamelCase__ )
_UpperCamelCase =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
_UpperCamelCase =[{'''col_1''': 1}, {'''col_2''': '''x'''}]
_UpperCamelCase =Dataset.from_list(UpperCamelCase__ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple: # checks if the type can be inferred from the second record
_UpperCamelCase =[{'''col_1''': []}, {'''col_1''': [1, 2]}]
_UpperCamelCase =Dataset.from_list(UpperCamelCase__ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCamelCase__ ( self : Dict ) -> Any:
_UpperCamelCase =Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : Union[str, Any]=400 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=0.9 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , ) -> Dict:
_UpperCamelCase =size if size is not None else {'''shortest_edge''': 30}
_UpperCamelCase =crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =num_channels
_UpperCamelCase =min_resolution
_UpperCamelCase =max_resolution
_UpperCamelCase =do_resize_and_center_crop
_UpperCamelCase =size
_UpperCamelCase =crop_pct
_UpperCamelCase =crop_size
_UpperCamelCase =do_normalize
_UpperCamelCase =image_mean
_UpperCamelCase =image_std
def UpperCamelCase__ ( self : str ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Optional[int] ) -> int:
_UpperCamelCase =PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Tuple ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[Any] ) -> int:
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''crop_pct''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
def UpperCamelCase__ ( self : Any ) -> Dict:
_UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
_UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self : Dict ) -> Optional[Any]:
pass
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
# Initialize image_processing
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ ( self : str ) -> List[Any]:
# Initialize image_processing
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 271 | 0 |
'''simple docstring'''
_a : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a : int = [{"type": "code", "content": INSTALL_CONTENT}]
_a : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_A = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_A = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class A ( __UpperCAmelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self, UpperCamelCase__, UpperCamelCase__="<s>", UpperCamelCase__="</s>", UpperCamelCase__="</s>", UpperCamelCase__="<s>", UpperCamelCase__="<unk>", UpperCamelCase__="<pad>", UpperCamelCase__="<mask>", UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
lowerCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ = 1
lowerCAmelCase_ = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__, ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
UpperCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 325 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''ZinengTang/tvlt-base'''
lowerCAmelCase_ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor, UpperCamelCase__ )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = feature_extractor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''', )
| 325 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[str] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["PoolFormerFeatureExtractor"]
_lowerCAmelCase : Union[str, Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
'''simple docstring'''
import baseaa
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> str:
return baseaa.baadecode(_UpperCAmelCase ).decode("""utf-8""" )
if __name__ == "__main__":
A__: Dict = '''Hello World!'''
A__: List[Any] = baseaa_encode(test)
print(encoded)
A__: List[str] = baseaa_decode(encoded)
print(decoded)
| 506 |
'''simple docstring'''
A__: Dict = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 506 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Any ) ->Tuple:
lowercase : int = inspect.getfile(accelerate.test_utils )
lowercase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
lowercase : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
lowercase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __snake_case ( self :Optional[Any] ) ->Tuple:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase : Tuple = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
@require_multi_gpu
def __snake_case ( self :Union[str, Any] ) ->Dict:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase : Any = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
@require_multi_gpu
def __snake_case ( self :Optional[Any] ) ->Union[str, Any]:
lowercase : int = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
@require_multi_gpu
def __snake_case ( self :List[str] ) ->Union[str, Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowercase : Union[str, Any] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 10)
_lowerCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCAmelCase = ''
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 264 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase :
def __init__( self :Any , __magic_name__ :list[tuple[float, float]] ) ->str:
lowercase : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase : Optional[int] = len(__magic_name__ ) - 1
def __snake_case ( self :List[str] , __magic_name__ :float ) ->list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __magic_name__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__magic_name__ ) , 5 ) == 1
return output_values
def __snake_case ( self :List[str] , __magic_name__ :float ) ->tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase : str = self.basis_function(__magic_name__ )
lowercase : Optional[int] = 0.0
lowercase : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __snake_case ( self :Tuple , __magic_name__ :float = 0.01 ) ->List[str]:
from matplotlib import pyplot as plt # type: ignore
lowercase : list[float] = [] # x coordinates of points to plot
lowercase : list[float] = [] # y coordinates of points to plot
lowercase : int = 0.0
while t <= 1:
lowercase : List[Any] = self.bezier_curve_function(__magic_name__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase : Union[str, Any] = [i[0] for i in self.list_of_points]
lowercase : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
__magic_name__ , __magic_name__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__magic_name__ , __magic_name__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 264 | 1 |
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = name
A__ = val
def __str__( self ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCAmelCase__ ):
return self.val < other.val
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = {}
A__ = {}
A__ = self.build_heap(UpperCAmelCase__ )
def __getitem__( self , UpperCAmelCase__ ):
return self.get_value(UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
return (idx - 1) // 2
def __A ( self , UpperCAmelCase__ ):
return idx * 2 + 1
def __A ( self , UpperCAmelCase__ ):
return idx * 2 + 2
def __A ( self , UpperCAmelCase__ ):
return self.heap_dict[key]
def __A ( self , UpperCAmelCase__ ):
A__ = len(UpperCAmelCase__ ) - 1
A__ = self.get_parent_idx(UpperCAmelCase__ )
for idx, i in enumerate(UpperCAmelCase__ ):
A__ = idx
A__ = i.val
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.sift_down(UpperCAmelCase__ , UpperCAmelCase__ )
return array
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
while True:
A__ = self.get_left_child_idx(UpperCAmelCase__ ) # noqa: E741
A__ = self.get_right_child_idx(UpperCAmelCase__ )
A__ = idx
if l < len(UpperCAmelCase__ ) and array[l] < array[idx]:
A__ = l
if r < len(UpperCAmelCase__ ) and array[r] < array[smallest]:
A__ = r
if smallest != idx:
A__ , A__ = array[smallest], array[idx]
(
(
A__
) , (
A__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A__ = smallest
else:
break
def __A ( self , UpperCAmelCase__ ):
A__ = self.get_parent_idx(UpperCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
A__ , A__ = self.heap[idx], self.heap[p]
A__ , A__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A__ = p
A__ = self.get_parent_idx(UpperCAmelCase__ )
def __A ( self ):
return self.heap[0]
def __A ( self ):
A__ , A__ = self.heap[-1], self.heap[0]
A__ , A__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __A ( self , UpperCAmelCase__ ):
self.heap.append(UpperCAmelCase__ )
A__ = len(self.heap ) - 1
A__ = node.val
self.sift_up(len(self.heap ) - 1 )
def __A ( self ):
return len(self.heap ) == 0
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A__ = new_value
A__ = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase_ : int = Node("R", -1)
UpperCAmelCase_ : List[Any] = Node("B", 6)
UpperCAmelCase_ : Any = Node("A", 3)
UpperCAmelCase_ : str = Node("X", 1)
UpperCAmelCase_ : Union[str, Any] = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase_ : Dict = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232 |
def UpperCamelCase ( _A : int = 50 )-> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232 | 1 |
def __UpperCAmelCase( ):
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : Any = 1
_lowerCamelCase : Dict = 2
while i * i <= n:
_lowerCamelCase : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __UpperCAmelCase( ):
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 114 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = 9
_lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCAmelCase = kruskal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE )
| 580 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : List[str] = MobileBertConfig.from_json_file(_lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase_ : Optional[int] = MobileBertForPreTraining(_lowercase )
# Load weights from tf checkpoint
lowerCamelCase_ : Optional[int] = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 357 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = '''▁'''
__lowercase : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase : List[str] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
__lowercase : Union[str, Any] = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class __lowercase ( _lowercase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__(self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowerCamelCase_ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ : int = 1
lowerCamelCase_ : Union[str, Any] = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , A ):
lowerCamelCase_ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : str = {}
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Optional[int] = [self.cls_token_id]
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase__ (self , A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ (self , A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : str = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowerCamelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 357 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case = 1_000 ) -> int:
__lowercase , __lowercase = 1, 1
__lowercase = []
for i in range(1 , n + 1 ):
__lowercase = prev_numerator + 2 * prev_denominator
__lowercase = prev_numerator + prev_denominator
if len(str(snake_case ) ) > len(str(snake_case ) ):
result.append(snake_case )
__lowercase = numerator
__lowercase = denominator
return len(snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 375 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a__ : int = 1_6
a__ : Union[str, Any] = 3_2
def _UpperCamelCase ( __A , __A = 16 , __A = "bert-base-cased" ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ = datasets.map(
__A , batched=__A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=__A , collate_fn=__A , batch_size=__A )
UpperCamelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __A , __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ = config["lr"]
UpperCamelCase__ = int(config["num_epochs"] )
UpperCamelCase__ = int(config["seed"] )
UpperCamelCase__ = int(config["batch_size"] )
UpperCamelCase__ = args.model_name_or_path
set_seed(__A )
UpperCamelCase__ , UpperCamelCase__ = get_dataloaders(__A , __A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
UpperCamelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase__ = 1
UpperCamelCase__ = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
UpperCamelCase__ = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ = 0
# Now we train the model
UpperCamelCase__ = evaluate.load("glue" , "mrpc" )
UpperCamelCase__ = 0
UpperCamelCase__ = {}
for epoch in range(__A , __A ):
model.train()
for step, batch in enumerate(__A ):
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = outputs.loss
UpperCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase__ = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__A ) - 1:
UpperCamelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__A , references=__A , )
UpperCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __A )
UpperCamelCase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(__A , __A )
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__A , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=__A , default=__A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=__A , default=3 , help="Number of train epochs." , )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(__A , __A ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(__A )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(
a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 223 | 0 |
from __future__ import annotations
import time
import numpy as np
snake_case : Union[str, Any] = [8, 5, 9, 7]
snake_case : Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowercase :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , )-> None:
_SCREAMING_SNAKE_CASE = claim_vector
_SCREAMING_SNAKE_CASE = allocated_resources_table
_SCREAMING_SNAKE_CASE = maximum_claim_table
def __magic_name__ ( self )-> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self )-> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self )-> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self )-> dict[int, list[int]]:
return {self.__need().index(A_ ): i for i in self.__need()}
def __magic_name__ ( self , **A_ )-> None:
_SCREAMING_SNAKE_CASE = self.__need()
_SCREAMING_SNAKE_CASE = self.__allocated_resources_table
_SCREAMING_SNAKE_CASE = self.__available_resources()
_SCREAMING_SNAKE_CASE = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_SCREAMING_SNAKE_CASE = False
for each_need in need_list:
_SCREAMING_SNAKE_CASE = True
for index, need in enumerate(A_ ):
if need > available_resources[index]:
_SCREAMING_SNAKE_CASE = False
break
if execution:
_SCREAMING_SNAKE_CASE = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_SCREAMING_SNAKE_CASE = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(A_ )
# update available/freed resources stack
_SCREAMING_SNAKE_CASE = np.array(A_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(A_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self )-> str:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(A_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(A_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
_SCREAMING_SNAKE_CASE = {'+', '-', '*', '/'}
_SCREAMING_SNAKE_CASE = []
for token in postfix_notation:
if token in operations:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 1 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = (KDPMaDiscreteScheduler,)
A = 1_0
def lowerCamelCase__ ( self :Tuple , **lowerCamelCase_ :Dict ) -> int:
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowerCamelCase__ ( self :Any ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__ = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCamelCase__ ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCamelCase__ ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
if str(lowerCamelCase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3 | 304 | """simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :TransformeraDModel , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :KarrasDiffusionSchedulers , lowerCamelCase_ :Optional[Dict[int, str]] = None , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=lowerCamelCase_ , vae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCamelCase__ = int(lowerCamelCase_ )
UpperCamelCase__ = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :int = 5_0 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = self.transformer.config.sample_size
UpperCamelCase__ = self.transformer.config.in_channels
UpperCamelCase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase__ = torch.tensor(lowerCamelCase_ , device=self.device ).reshape(-1 )
UpperCamelCase__ = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase__ = latent_model_input[: len(lowerCamelCase_ ) // 2]
UpperCamelCase__ = torch.cat([half, half] , dim=0 )
UpperCamelCase__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase__ = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase__ = torch.intaa if is_mps else torch.intaa
UpperCamelCase__ = torch.tensor([timesteps] , dtype=lowerCamelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase__ = self.transformer(
lowerCamelCase_ , timestep=lowerCamelCase_ , class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , len(lowerCamelCase_ ) // 2 , dim=0 )
UpperCamelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase__ = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , lowerCamelCase_ , dim=1 )
else:
UpperCamelCase__ = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase__ = latent_model_input
UpperCamelCase__ = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase__ = self.vae.decode(lowerCamelCase_ ).sample
UpperCamelCase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ ) | 304 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase ( __UpperCAmelCase ):
@require_torch
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCamelCase = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(_lowerCamelCase )
BertModel.from_pretrained(_lowerCamelCase )
BertTokenizer.from_pretrained(_lowerCamelCase )
pipeline(task="""fill-mask""" , model=_lowerCamelCase )
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCamelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = '''1'''
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCamelCase = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(_lowerCamelCase )
BertModel.from_pretrained(_lowerCamelCase )
BertTokenizer.from_pretrained(_lowerCamelCase )
pipeline(task="""fill-mask""" , model=_lowerCamelCase )
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = '''1'''
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '''
from transformers import pipeline
'''
UpperCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCamelCase = self.get_env()
UpperCamelCase = '''1'''
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '''
from transformers import AutoModel
'''
UpperCamelCase = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = '''1'''
UpperCamelCase = subprocess.run(_lowerCamelCase , env=_lowerCamelCase , check=_lowerCamelCase , capture_output=_lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 386 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''roc_bert'''
def __init__( self :Union[str, Any] , _lowerCamelCase :Any=3_0_5_2_2 , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Optional[Any]=1_2 , _lowerCamelCase :List[str]=1_2 , _lowerCamelCase :str=3_0_7_2 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :List[Any]=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :Optional[int]=5_1_2 , _lowerCamelCase :Dict=2 , _lowerCamelCase :Any=0.0_2 , _lowerCamelCase :Optional[int]=1e-12 , _lowerCamelCase :str=True , _lowerCamelCase :Any=0 , _lowerCamelCase :List[str]="absolute" , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Any=True , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Union[str, Any]=9_1_0 , _lowerCamelCase :List[Any]=5_1_2 , _lowerCamelCase :Optional[int]=2_4_8_5_8 , _lowerCamelCase :Union[str, Any]=True , **_lowerCamelCase :str , ):
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : int = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = use_cache
__SCREAMING_SNAKE_CASE : str = enable_pronunciation
__SCREAMING_SNAKE_CASE : List[str] = enable_shape
__SCREAMING_SNAKE_CASE : Tuple = pronunciation_embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = pronunciation_vocab_size
__SCREAMING_SNAKE_CASE : str = shape_embed_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = shape_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = concat_input
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = classifier_dropout
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
| 674 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Optional[int] = "swinv2"
UpperCamelCase_ : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=2_24 , a=4 , a=3 , a=96 , a=[2, 2, 6, 2] , a=[3, 6, 12, 24] , a=7 , a=4.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=0.02 , a=1e-5 , a=32 , **a , ) -> Any:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(a )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(a ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 202 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : int = frozenset([] )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
_UpperCamelCase = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_UpperCamelCase = CLIPTextModel(a )
_UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ ( self , a , a=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((64, 64) )
_UpperCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a ).startswith("""mps""" ):
_UpperCamelCase = torch.manual_seed(a )
else:
_UpperCamelCase = torch.Generator(device=a ).manual_seed(a )
_UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInpaintPipeline(**a )
_UpperCamelCase = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = self.get_dummy_inputs(a )
_UpperCamelCase = sd_pipe(**a ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A_ ( self ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = PNDMScheduler.from_pretrained(a , subfolder="""scheduler""" )
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="""np""" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 202 | 1 |
from math import pi, sqrt, tan
def A__ ( snake_case_ : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def A__ ( snake_case_ : float , snake_case_ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def A__ ( snake_case_ : float , snake_case_ : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def A__ ( snake_case_ : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE__: Dict= (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__: Dict= sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def A__ ( snake_case_ : float , snake_case_ : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def A__ ( snake_case_ : float , snake_case_ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def A__ ( snake_case_ : int , snake_case_ : float ):
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''')
print(f'''Square: {area_square(1_0) = }''')
print(f'''Triangle: {area_triangle(1_0, 1_0) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''')
print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''')
print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''')
print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''')
print(f'''Circle: {area_circle(2_0) = }''')
print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(2_0) = }''')
print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''')
print(f'''Sphere: {surface_area_sphere(2_0) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''')
print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''')
print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''')
print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''')
print(f'''Square: {area_reg_polygon(4, 1_0) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
| 64 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : str = """deta"""
lowerCAmelCase_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self ,__a=None ,__a=900 ,__a=2_048 ,__a=6 ,__a=2_048 ,__a=8 ,__a=6 ,__a=1_024 ,__a=8 ,__a=0.0 ,__a=True ,__a="relu" ,__a=256 ,__a=0.1 ,__a=0.0 ,__a=0.0 ,__a=0.02 ,__a=1.0 ,__a=True ,__a=False ,__a="sine" ,__a=5 ,__a=4 ,__a=4 ,__a=True ,__a=300 ,__a=True ,__a=True ,__a=1 ,__a=5 ,__a=2 ,__a=1 ,__a=1 ,__a=5 ,__a=2 ,__a=0.1 ,__a=0.25 ,**__a ,) -> int:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(__a ,__a ):
snake_case : str = backbone_config.pop("""model_type""" )
snake_case : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case : List[Any] = config_class.from_dict(__a )
snake_case : Any = backbone_config
snake_case : Optional[Any] = num_queries
snake_case : Dict = max_position_embeddings
snake_case : int = d_model
snake_case : str = encoder_ffn_dim
snake_case : Dict = encoder_layers
snake_case : List[Any] = encoder_attention_heads
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_layers
snake_case : Dict = decoder_attention_heads
snake_case : Dict = dropout
snake_case : Dict = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : Dict = activation_function
snake_case : List[Any] = init_std
snake_case : Any = init_xavier_std
snake_case : List[Any] = encoder_layerdrop
snake_case : Optional[Any] = auxiliary_loss
snake_case : Optional[int] = position_embedding_type
# deformable attributes
snake_case : Tuple = num_feature_levels
snake_case : List[Any] = encoder_n_points
snake_case : Dict = decoder_n_points
snake_case : int = two_stage
snake_case : Any = two_stage_num_proposals
snake_case : List[str] = with_box_refine
snake_case : Tuple = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
snake_case : Optional[int] = class_cost
snake_case : int = bbox_cost
snake_case : Optional[int] = giou_cost
# Loss coefficients
snake_case : str = mask_loss_coefficient
snake_case : Any = dice_loss_coefficient
snake_case : List[Any] = bbox_loss_coefficient
snake_case : List[Any] = giou_loss_coefficient
snake_case : List[str] = eos_coefficient
snake_case : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=__a ,**__a )
@property
def snake_case_ ( self ) -> int:
return self.encoder_attention_heads
@property
def snake_case_ ( self ) -> int:
return self.d_model
def snake_case_ ( self ) -> List[str]:
snake_case : List[str] = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.backbone_config.to_dict()
snake_case : List[Any] = self.__class__.model_type
return output
| 116 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def UpperCAmelCase ( ) -> List[str]:
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_snake_case : List[Any] = parser.parse_args()
return args.f
def UpperCAmelCase ( A__ ) -> Tuple:
_snake_case : Union[str, Any] = {}
_snake_case : int = os.path.join(A__ , """all_results.json""" )
if os.path.exists(A__ ):
with open(A__ , """r""" ) as f:
_snake_case : int = json.load(A__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ) -> List[str]:
_snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : Optional[int] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : List[str] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Optional[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Dict = self.get_auto_remove_tmp_dir()
_snake_case : int = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_snake_case : Optional[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result["""perplexity"""] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Dict = self.get_auto_remove_tmp_dir()
_snake_case : Any = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = 7 if get_gpu_count() > 1 else 2
_snake_case : Dict = self.get_auto_remove_tmp_dir()
_snake_case : Union[str, Any] = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = self.get_auto_remove_tmp_dir()
_snake_case : List[str] = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : Any = get_results(SCREAMING_SNAKE_CASE__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[Any] = self.get_auto_remove_tmp_dir()
_snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
_snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : List[str] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = self.get_auto_remove_tmp_dir()
_snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_snake_case : Dict = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """translation_no_trainer""" ) ) )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = self.get_auto_remove_tmp_dir()
_snake_case : Any = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Dict = get_results(SCREAMING_SNAKE_CASE__ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """image_classification_no_trainer""" ) ) )
| 703 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , ):
"""simple docstring"""
_snake_case : Any = parent
_snake_case : str = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : int = use_labels
_snake_case : int = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[Any] = eos_token_id
_snake_case : Dict = pad_token_id
_snake_case : Dict = bos_token_id
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case : Dict = prepare_blenderbot_small_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Dict = TFBlenderbotSmallModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
_snake_case : Any = inputs_dict["""input_ids"""]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["""attention_mask"""][:1, :]
_snake_case : Dict = inputs_dict["""head_mask"""]
_snake_case : Dict = 1
# first forward pass
_snake_case : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def UpperCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case : str = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = TFBlenderbotSmallModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
SCREAMING_SNAKE_CASE_ = 'facebook/blenderbot_small-90M'
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" )
_snake_case : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 519 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = """wavlm"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1e-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]="group" , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=320 , __SCREAMING_SNAKE_CASE : List[str]=800 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[Any]=0.05 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=320 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=100 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[int]=256 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict="mean" , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : Dict=(512, 512, 512, 512, 1500) , __SCREAMING_SNAKE_CASE : int=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Optional[int]=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=80 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict:
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_buckets
lowerCamelCase_ = max_bucket_distance
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_ctc_classes
lowerCamelCase_ = vocab_size
lowerCamelCase_ = do_stable_layer_norm
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ = num_codevectors_per_group
lowerCamelCase_ = num_codevector_groups
lowerCamelCase_ = contrastive_logits_temperature
lowerCamelCase_ = num_negatives
lowerCamelCase_ = codevector_dim
lowerCamelCase_ = proj_codevector_dim
lowerCamelCase_ = diversity_loss_weight
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# adapter
lowerCamelCase_ = add_adapter
lowerCamelCase_ = adapter_kernel_size
lowerCamelCase_ = adapter_stride
lowerCamelCase_ = num_adapter_layers
lowerCamelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = xvector_output_dim
@property
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 549 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Optional[int] ) -> int:
pass
| 549 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase__ : Optional[int] = threading.Lock()
lowercase__ : Optional[logging.Handler] = None
lowercase__ : Optional[int] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowercase__ : Tuple = logging.WARNING
lowercase__ : Optional[int] = True
def __lowercase ( ):
snake_case_ : Dict = os.getenv('''TRANSFORMERS_VERBOSITY''' , _a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowercase ( ):
return __name__.split('''.''' )[0]
def __lowercase ( ):
return logging.getLogger(_get_library_name() )
def __lowercase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case_ : Tuple = logging.StreamHandler() # Set sys.stderr as stream.
snake_case_ : str = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case_ : Optional[int] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case_ : Optional[int] = False
def __lowercase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
snake_case_ : List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case_ : Optional[Any] = None
def __lowercase ( ):
return log_levels
def __lowercase ( _a = None ):
if name is None:
snake_case_ : int = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_a )
def __lowercase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowercase ( _a ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(_a )
def __lowercase ( ):
return set_verbosity(_a )
def __lowercase ( ):
return set_verbosity(_a )
def __lowercase ( ):
return set_verbosity(_a )
def __lowercase ( ):
return set_verbosity(_a )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowercase ( _a ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_a )
def __lowercase ( _a ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_a )
def __lowercase ( ):
_configure_library_root_logger()
snake_case_ : Optional[int] = False
def __lowercase ( ):
_configure_library_root_logger()
snake_case_ : str = True
def __lowercase ( ):
snake_case_ : Tuple = _get_library_root_logger().handlers
for handler in handlers:
snake_case_ : str = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_a )
def __lowercase ( ):
snake_case_ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_a )
def __lowercase ( self , *_a , **_a ):
snake_case_ : str = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _a )
if no_advisory_warnings:
return
self.warning(*_a , **_a )
lowercase__ : List[str] = warning_advice
@functools.lru_cache(_a )
def __lowercase ( self , *_a , **_a ):
self.warning(*_a , **_a )
lowercase__ : int = warning_once
class _UpperCAmelCase :
def __init__( self : Any , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ): # pylint: disable=unused-argument
snake_case_ : Optional[Any] = args[0] if args else None
def __iter__( self : int ):
return iter(self._iterator )
def __getattr__( self : List[str] , lowercase_ : str ):
def empty_fn(*lowercase_ : Optional[Any] , **lowercase_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ):
return self
def __exit__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any ):
return
class _UpperCAmelCase :
def __call__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : int ):
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def _snake_case ( self : List[Any] , *lowercase_ : str , **lowercase_ : str ):
snake_case_ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def _snake_case ( self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ : Dict = _tqdm_cls()
def __lowercase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __lowercase ( ):
global _tqdm_active
snake_case_ : str = True
hf_hub_utils.enable_progress_bars()
def __lowercase ( ):
global _tqdm_active
snake_case_ : int = False
hf_hub_utils.disable_progress_bars()
| 714 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
snake_case_ : List[Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(lowercase_ )
from datasets import load_dataset
snake_case_ : str = load_dataset('''nielsr/rvlcdip-demo''' )
snake_case_ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
snake_case_ : Union[str, Any] = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : List[Any] = model(**lowercase_ )
snake_case_ : int = outputs.logits
snake_case_ : Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : Any = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 485 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_UpperCamelCase : Optional[int] = i + 1
else:
_UpperCamelCase : int = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , ):
'''simple docstring'''
_A = bnb_quantization_config.load_in_abit
_A = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
_A = []
# custom device map
if isinstance(_lowercase , _lowercase ) and len(device_map.keys() ) > 1:
_A = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_A = get_keys_to_not_convert(_lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowercase )
_A = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_A = []
_A = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowercase )
# compatibility with peft
_A = load_in_abit
_A = load_in_abit
_A = get_parameter_device(_lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
_A = replace_with_bnb_layers(_lowercase , _lowercase , modules_to_not_convert=_lowercase )
# convert param to the right dtype
_A = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_A = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
_A = getattr(_lowercase , _lowercase , _lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowercase ):
param.to(_lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_A = replace_with_bnb_layers(
_lowercase , _lowercase , modules_to_not_convert=_lowercase )
_A = get_quantized_model_device_map(
_lowercase , _lowercase , _lowercase , max_memory=_lowercase , no_split_module_classes=_lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_A = True
_A = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_lowercase , _lowercase , _lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowercase , offload_state_dict=_lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowercase , device_map=_lowercase , offload_dir=_lowercase )
def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_A = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_lowercase , _lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
_A = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_A = {}
_A = special_dtypes
_A = no_split_module_classes
_A = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_A = get_balanced_memory(
_lowercase , low_zero=(device_map == '''balanced_low_0''') , max_memory=_lowercase , **_lowercase , )
_A = max_memory
_A = infer_auto_device_map(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ):
# check if don't have any quantized module on the cpu
_A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_A = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if modules_to_not_convert is None:
_A = []
_A ,_A = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
_A = False
for name, module in model.named_children():
if current_key_name is None:
_A = []
current_key_name.append(_lowercase )
if isinstance(_lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_A = '''.'''.join(_lowercase )
_A = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_A = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_A = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_A = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
_A = module.weight.data
if module.bias is not None:
_A = module.bias.data
bnb_module.requires_grad_(_lowercase )
setattr(_lowercase , _lowercase , _lowercase )
_A = True
if len(list(module.children() ) ) > 0:
_A ,_A = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
_A = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __A ( _lowercase ):
'''simple docstring'''
with init_empty_weights():
_A = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_A = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
_A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A = sum(_lowercase , [] )
_A = len(_lowercase ) > 0
# Check if it is a base model
_A = False
if hasattr(_lowercase , '''base_model_prefix''' ):
_A = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A = list(model.named_children() )
_A = [list_modules[-1][0]]
# add last module together with tied weights
_A = set(_lowercase ) - set(_lowercase )
_A = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
_A = ['''.weight''', '''.bias''']
_A = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A = name.replace(_lowercase , '''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
def __A ( _lowercase ):
'''simple docstring'''
for m in model.modules():
if isinstance(_lowercase , bnb.nn.Linearabit ):
return True
return False
def __A ( _lowercase ):
'''simple docstring'''
return next(parameter.parameters() ).device
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowercase , _lowercase , 0 , dtype=_lowercase , value=_lowercase )
_A = param_name
_A = model
if "." in tensor_name:
_A = tensor_name.split('''.''' )
for split in splits[:-1]:
_A = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_A = new_module
_A = splits[-1]
# offload weights
_A = False
offload_weight(module._parameters[tensor_name] , _lowercase , _lowercase , index=_lowercase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _lowercase , index=_lowercase , )
else:
offload_weight(_lowercase , _lowercase , _lowercase , index=_lowercase )
offload_weight(_lowercase , param_name.replace('''weight''' , '''SCB''' ) , _lowercase , index=_lowercase )
set_module_tensor_to_device(_lowercase , _lowercase , '''meta''' , dtype=_lowercase , value=torch.empty(*param.size() ) )
| 62 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : Tuple=1_3 , snake_case__ : List[str]=3_0 , snake_case__ : Any=2 , snake_case__ : Any=3 , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=3_2 , snake_case__ : Optional[Any]=2 , snake_case__ : int=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : str=1_0 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : Tuple=0.6 , snake_case__ : Any=None , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Union[str, Any] = mask_ratio
UpperCAmelCase__ : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __a ( self : str ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __a ( self : Dict , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFViTMAEModel(config=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFViTMAEForPreTraining(snake_case__ )
UpperCAmelCase__ : str = model(snake_case__ , training=snake_case__ )
# expected sequence length = num_patches
UpperCAmelCase__ : Optional[int] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Any = TFViTMAEForPreTraining(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : str = model(snake_case__ , training=snake_case__ )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = config_and_inputs
UpperCAmelCase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFViTMAEModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __a ( self : Optional[int] ):
'''simple docstring'''
pass
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(snake_case__ )
UpperCAmelCase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
UpperCAmelCase__ : Any = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , noise=snake_case__ )
UpperCAmelCase__ : Optional[Any] = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : List[Any] = model(**snake_case__ , noise=snake_case__ )
UpperCAmelCase__ : Tuple = outputs_dict[0].numpy()
UpperCAmelCase__ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __a ( self : int ):
'''simple docstring'''
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case__ : Any ):
UpperCAmelCase__ : Optional[Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case__ ):
UpperCAmelCase__ : Optional[Any] = v.numpy()
else:
UpperCAmelCase__ : Any = np.array(snake_case__ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : int = prepare_numpy_arrays(snake_case__ )
UpperCAmelCase__ : List[str] = model(snake_case__ , noise=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
def __a ( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : int ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : Optional[Any] = tf.constant(snake_case__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : int = tf_noise
super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case__ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(snake_case__ , snake_case__ ),)
if isinstance(snake_case__ , snake_case__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case__ , "_keras_serializable" , snake_case__ )
}
UpperCAmelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : Tuple = tf.convert_to_tensor(snake_case__ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase__ : List[Any] = main_layer_class(snake_case__ )
UpperCAmelCase__ : Tuple = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase__ : int = tf.keras.Model(snake_case__ , outputs=main_layer(snake_case__ ) )
UpperCAmelCase__ : str = model(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : List[str] = os.path.join(snake_case__ , "keras_model.h5" )
model.save(snake_case__ )
UpperCAmelCase__ : str = tf.keras.models.load_model(
snake_case__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case__ , tf.keras.Model )
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@slow
def __a ( self : Dict ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase__ : Dict = outputs.last_hidden_state.numpy()
UpperCAmelCase__ : Tuple = 0
else:
UpperCAmelCase__ : Tuple = outputs.logits.numpy()
UpperCAmelCase__ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
UpperCAmelCase__ : Dict = model_class.from_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase__ : List[str] = after_outputs["last_hidden_state"].numpy()
UpperCAmelCase__ : List[str] = 0
else:
UpperCAmelCase__ : List[str] = after_outputs["logits"].numpy()
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
def __a ( self : Optional[int] ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(snake_case__ )
UpperCAmelCase__ : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , noise=snake_case__ )
UpperCAmelCase__ : List[str] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case__ )
UpperCAmelCase__ : str = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase__ : List[str] = model_class.from_config(model.config )
UpperCAmelCase__ : Union[str, Any] = new_model(snake_case__ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase__ : List[Any] = new_model(snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __a ( self : Any ):
'''simple docstring'''
pass
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Dict = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : Any = ViTMAEConfig()
UpperCAmelCase__ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase__ : str = model(**snake_case__ , noise=snake_case__ )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 )
| 438 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : Node | None = None
class lowerCAmelCase__ :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.head
while self.head:
yield node.data
UpperCAmelCase__ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self : Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(snake_case__ ) for item in iter(self ) )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(0 , snake_case__ )
def __a ( self : Any , snake_case__ : int , snake_case__ : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Any = Node(snake_case__ )
if self.head is None:
UpperCAmelCase__ : Union[str, Any] = new_node # first node points itself
UpperCAmelCase__ : List[str] = new_node
elif index == 0: # insert at head
UpperCAmelCase__ : Optional[Any] = self.head
UpperCAmelCase__ : List[Any] = new_node
else:
UpperCAmelCase__ : Union[str, Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : str = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase__ : Union[str, Any] = new_node
def __a ( self : List[Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __a ( self : Tuple , snake_case__ : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
UpperCAmelCase__ : str = None
elif index == 0: # delete head node
UpperCAmelCase__ : List[Any] = self.tail.next.next
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase__ : int = temp
return delete_node.data
def __a ( self : Any ):
'''simple docstring'''
return len(self ) == 0
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : str = CircularLinkedList()
assert len(snake_case ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case ) == i
circular_linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 | 1 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
__lowercase : str = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowercase : Any = []
__lowercase : Dict = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
__lowercase : str = factorials.pop()
__lowercase , __lowercase : List[str] = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase : List[Any] = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : Union[str, Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase : Any = bs[:]
__lowercase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__lowercase : List[str] = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
__lowercase : Optional[int] = set()
__lowercase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : List[Any] = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , _snake_case : List[Any] , _snake_case : Dict , _snake_case : int="replace" , _snake_case : Optional[int]="<s>" , _snake_case : List[Any]="</s>" , _snake_case : str="</s>" , _snake_case : Optional[int]="<s>" , _snake_case : List[Any]="<unk>" , _snake_case : Optional[Any]="<pad>" , _snake_case : List[Any]="<mask>" , _snake_case : Optional[Any]=False , **_snake_case : Dict , ):
__lowercase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
__lowercase : List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
__lowercase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
__lowercase : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
__lowercase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
__lowercase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[str] = json.load(_snake_case )
__lowercase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowercase : Dict = errors # how to handle errors in decoding
__lowercase : str = bytes_to_unicode()
__lowercase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
__lowercase : Any = merges_handle.read().split('''\n''' )[1:-1]
__lowercase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase : Dict = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : Dict = {}
__lowercase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case_ ( self : Dict ):
return len(self.encoder )
def snake_case_ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Tuple , _snake_case : str ):
if token in self.cache:
return self.cache[token]
__lowercase : Optional[Any] = tuple(_snake_case )
__lowercase : str = get_pairs(_snake_case )
if not pairs:
return token
while True:
__lowercase : Dict = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : Tuple = bigram
__lowercase : Dict = []
__lowercase : int = 0
while i < len(_snake_case ):
try:
__lowercase : List[Any] = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Optional[int] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : str = tuple(_snake_case )
__lowercase : Tuple = new_word
if len(_snake_case ) == 1:
break
else:
__lowercase : Any = get_pairs(_snake_case )
__lowercase : Union[str, Any] = ''' '''.join(_snake_case )
__lowercase : Union[str, Any] = word
return word
def snake_case_ ( self : Tuple , _snake_case : List[Any] ):
__lowercase : Optional[int] = []
for token in re.findall(self.pat , _snake_case ):
__lowercase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(''' ''' ) )
return bpe_tokens
def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : Dict , _snake_case : List[Any] ):
return self.decoder.get(_snake_case )
def snake_case_ ( self : int , _snake_case : Dict ):
__lowercase : List[Any] = ''''''.join(_snake_case )
__lowercase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case_ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : str = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
__lowercase : List[str] = 0
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : List[str] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : int = [self.cls_token_id]
__lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def snake_case_ ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
__lowercase : Dict = [self.sep_token_id]
__lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int]=False , **_snake_case : List[Any] ):
__lowercase : int = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
__lowercase : Optional[Any] = ''' ''' + text
return (text, kwargs)
def snake_case_ ( self : Tuple , _snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , _snake_case : Optional[int] = None , _snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , ):
__lowercase : Optional[Any] = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
__lowercase : List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_snake_case )
if needs_to_be_padded:
__lowercase : List[Any] = len(_snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase : Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase : Union[str, Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 284 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 216 | from itertools import product
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = sides_number
SCREAMING_SNAKE_CASE_ : List[str] = max_face_number * dice_number
SCREAMING_SNAKE_CASE_ : Tuple = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : List[str] = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
SCREAMING_SNAKE_CASE_ : List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : str = 9
SCREAMING_SNAKE_CASE_ : Any = 4 * 9
SCREAMING_SNAKE_CASE_ : str = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE_ : Dict = (4**9) * (6**6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE_ : Optional[Any] = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 216 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __A ( _UpperCamelCase ):
"""simple docstring"""
def A__ ( self , __snake_case):
with open(__a , encoding='utf-8') as input_file:
_UpperCamelCase : Dict = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_UpperCamelCase : Dict = input_file.read()
_UpperCamelCase : List[str] = regexp.search(__a)
return match
def A__ ( self , __snake_case):
with open(__a , encoding='utf-8') as input_file:
_UpperCamelCase : str = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
_UpperCamelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : int = regexp.finditer(__a)
_UpperCamelCase : Tuple = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def A__ ( self):
_UpperCamelCase : Dict = Path('./datasets')
_UpperCamelCase : List[str] = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a)):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''')
def A__ ( self):
_UpperCamelCase : List[str] = Path('./datasets')
_UpperCamelCase : str = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(__a)):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''')
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : int = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
"""simple docstring"""
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCamelCase__ )
for i in range(length - 1 ):
_SCREAMING_SNAKE_CASE = i
for k in range(i + 1 , UpperCamelCase__ ):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE = k
if least != i:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 168 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase__ = "laptop" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = F'''https://www.amazon.in/laptop/s?k={product}'''
_SCREAMING_SNAKE_CASE = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
_SCREAMING_SNAKE_CASE = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_SCREAMING_SNAKE_CASE = item.ha.text
_SCREAMING_SNAKE_CASE = '''https://www.amazon.in/''' + item.ha.a['''href''']
_SCREAMING_SNAKE_CASE = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_SCREAMING_SNAKE_CASE = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_SCREAMING_SNAKE_CASE = '''Not available'''
try:
_SCREAMING_SNAKE_CASE = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_SCREAMING_SNAKE_CASE = ''''''
try:
_SCREAMING_SNAKE_CASE = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
_SCREAMING_SNAKE_CASE = float('''nan''' )
except AttributeError:
pass
_SCREAMING_SNAKE_CASE = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_SCREAMING_SNAKE_CASE = ''' '''
_SCREAMING_SNAKE_CASE = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : str = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 168 | 1 |
import math
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Tuple = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float = 1 / 1_2345 ):
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : str = 0
UpperCamelCase_ : Optional[Any] = 3
while True:
UpperCamelCase_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Tuple = int(_SCREAMING_SNAKE_CASE )
total_partitions += 1
if check_partition_perfect(_SCREAMING_SNAKE_CASE ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_SCREAMING_SNAKE_CASE )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 635 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : List[str] = 16
a : str = 32
def __magic_name__ ( UpperCamelCase : Accelerator , UpperCamelCase : int = 16 ) -> Dict:
a__ = AutoTokenizer.from_pretrained('bert-base-cased' )
a__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ = 16
elif accelerator.mixed_precision != "no":
a__ = 8
else:
a__ = None
return tokenizer.pad(
UpperCamelCase , padding='longest' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Union[str, Any] = mocked_dataloaders # noqa: F811
def __magic_name__ ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase ) == "1":
a__ = 2
# New Code #
a__ = int(args.gradient_accumulation_steps )
a__ = int(args.local_sgd_steps )
# Initialize accelerator
a__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase )
a__ , a__ = get_dataloaders(UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ = model.to(accelerator.device )
# Instantiate optimizer
a__ = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
a__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=UpperCamelCase , model=UpperCamelCase , local_sgd_steps=UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase ):
a__ = model(**UpperCamelCase )
a__ = output.loss
accelerator.backward(UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**UpperCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
a__ , a__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
def __magic_name__ ( ) -> Any:
a__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase , default=UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=UpperCamelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
a__ = parser.parse_args()
a__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 273 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCamelCase : List[Any] =threading.Lock()
lowerCamelCase : Optional[logging.Handler] =None
lowerCamelCase : int ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowerCamelCase : str =logging.WARNING
lowerCamelCase : Tuple =True
def lowercase__( ):
snake_case__ : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase__( ):
return __name__.split('.' )[0]
def lowercase__( ):
return logging.getLogger(_get_library_name() )
def lowercase__( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ : List[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ : Union[str, Any] = False
def lowercase__( ):
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ : List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ : Any = None
def lowercase__( ):
return log_levels
def lowercase__( A = None ):
if name is None:
snake_case__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A )
def lowercase__( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase__( A ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(A )
def lowercase__( ):
return set_verbosity(A )
def lowercase__( ):
return set_verbosity(A )
def lowercase__( ):
return set_verbosity(A )
def lowercase__( ):
return set_verbosity(A )
def lowercase__( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase__( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase__( A ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A )
def lowercase__( A ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A )
def lowercase__( ):
_configure_library_root_logger()
snake_case__ : str = False
def lowercase__( ):
_configure_library_root_logger()
snake_case__ : int = True
def lowercase__( ):
snake_case__ : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ : Tuple = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(A )
def lowercase__( ):
snake_case__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A )
def lowercase__( self , *A , **A ):
snake_case__ : int = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , A )
if no_advisory_warnings:
return
self.warning(*A , **A )
lowerCamelCase : Any =warning_advice
@functools.lru_cache(A )
def lowercase__( self , *A , **A ):
self.warning(*A , **A )
lowerCamelCase : Union[str, Any] =warning_once
class snake_case__ :
def __init__( self : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[Any] ): # pylint: disable=unused-argument
snake_case__ : Optional[Any] = args[0] if args else None
def __iter__( self : str ):
return iter(self._iterator )
def __getattr__( self : Any , _lowerCamelCase : Any ):
def empty_fn(*_lowerCamelCase : int , **_lowerCamelCase : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ):
return self
def __exit__( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
return
class snake_case__ :
def __call__( self : Tuple , *_lowerCamelCase : str , **_lowerCamelCase : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[Any] ):
snake_case__ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase : List[Any] =_tqdm_cls()
def lowercase__( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase__( ):
global _tqdm_active
snake_case__ : Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def lowercase__( ):
global _tqdm_active
snake_case__ : int = False
hf_hub_utils.disable_progress_bars()
| 703 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : Any = get_tests_dir('fixtures')
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : int = mock.Mock()
snake_case__ : Any = 5_0_0
snake_case__ : Dict = {}
snake_case__ : List[str] = HTTPError
snake_case__ : str = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class snake_case__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls : str ):
snake_case__ : Tuple = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='test-feature-extractor' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case__ : Optional[int] = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case__ : int = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 303 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a_ ( __UpperCAmelCase ) -> datetime:
"""simple docstring"""
snake_case: List[str] =year % 19
snake_case: int =year % 4
snake_case: int =year % 7
snake_case: Union[str, Any] =math.floor(year / 1_00 )
snake_case: List[Any] =math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case: Optional[Any] =leap_day_inhibits / 4
snake_case: int =(
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case: List[Any] =(4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case: Optional[int] =(19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case: Tuple =(
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 350 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['OwlViTFeatureExtractor']
a = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
_UpperCAmelCase : List[Any] =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int =self.dummy_uncond_unet
_UpperCAmelCase : Any =KarrasVeScheduler()
_UpperCAmelCase : str =KarrasVePipeline(unet=__a , scheduler=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_UpperCAmelCase : Optional[int] =torch.manual_seed(0)
_UpperCAmelCase : Tuple =pipe(num_inference_steps=2 , generator=__a , output_type='numpy').images
_UpperCAmelCase : int =torch.manual_seed(0)
_UpperCAmelCase : Any =pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a)[0]
_UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : Union[str, Any] =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any ='google/ncsnpp-celebahq-256'
_UpperCAmelCase : List[str] =UNetaDModel.from_pretrained(__a)
_UpperCAmelCase : str =KarrasVeScheduler()
_UpperCAmelCase : List[str] =KarrasVePipeline(unet=__a , scheduler=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_UpperCAmelCase : List[str] =torch.manual_seed(0)
_UpperCAmelCase : List[str] =pipe(num_inference_steps=2_0 , generator=__a , output_type='numpy').images
_UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_UpperCAmelCase : List[str] =np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 712 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =size if size is not None else {'height': 1_8, 'width': 1_8}
_UpperCAmelCase : Optional[Any] =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Union[str, Any] =num_channels
_UpperCAmelCase : List[str] =image_size
_UpperCAmelCase : List[Any] =min_resolution
_UpperCAmelCase : Optional[Any] =max_resolution
_UpperCAmelCase : List[Any] =do_resize
_UpperCAmelCase : Union[str, Any] =size
_UpperCAmelCase : List[Any] =apply_ocr
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =LayoutLMvaImageProcessingTester(self)
@property
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case , 'do_resize'))
self.assertTrue(hasattr(snake_case , 'size'))
self.assertTrue(hasattr(snake_case , 'apply_ocr'))
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8})
_UpperCAmelCase : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image)
# Test not batched input
_UpperCAmelCase : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , snake_case)
self.assertIsInstance(encoding.boxes , snake_case)
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray)
# Test not batched input
_UpperCAmelCase : List[str] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor)
# Test not batched input
_UpperCAmelCase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : List[Any] =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# with apply_OCR = True
_UpperCAmelCase : str =LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Tuple =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_UpperCAmelCase : Tuple =Image.open(ds[0]['file']).convert('RGB')
_UpperCAmelCase : Any =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : Dict =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_UpperCAmelCase : Optional[int] =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case)
self.assertListEqual(encoding.boxes , snake_case)
# with apply_OCR = False
_UpperCAmelCase : Dict =LayoutLMvaImageProcessor(apply_ocr=snake_case)
_UpperCAmelCase : Optional[int] =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 331 | 0 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 |
"""simple docstring"""
import math
class lowercase:
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
a__ = 0.0
a__ = 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __magic_name__ ( ) -> None:
# Training Examples ( m, n )
a__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
a__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
a__ = SelfOrganizingMap()
a__ = 3
a__ = 0.5
for _ in range(UpperCamelCase ):
for j in range(len(UpperCamelCase ) ):
# training sample
a__ = training_samples[j]
# Compute the winning vector
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# Update the winning vector
a__ = self_organizing_map.update(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# classify test sample
a__ = [0, 0, 0, 1]
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 273 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '''ylacombe/bark-small'''
UpperCamelCase__ : Dict = tempfile.mkdtemp()
UpperCamelCase__ : Dict = '''en_speaker_1'''
UpperCamelCase__ : Optional[int] = '''This is a test string'''
UpperCamelCase__ : Union[str, Any] = '''speaker_embeddings_path.json'''
UpperCamelCase__ : str = '''speaker_embeddings'''
def UpperCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : str = BarkProcessor(tokenizer=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase__ : str = 35
UpperCamelCase__ : int = 2
UpperCamelCase__ : List[Any] = 8
UpperCamelCase__ : str = {
'''semantic_prompt''': np.ones(lowerCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase__ : int = processor(text=self.input_string , voice_preset=lowerCamelCase__ )
UpperCamelCase__ : Dict = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Dict = processor(text=self.input_string , voice_preset=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase__ : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_tokenizer()
UpperCamelCase__ : Tuple = BarkProcessor(tokenizer=lowerCamelCase__ )
UpperCamelCase__ : List[str] = processor(text=self.input_string )
UpperCamelCase__ : str = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 106 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = ["image_processor", "tokenizer"]
A: Optional[Any] = "LayoutLMv2ImageProcessor"
A: List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : Optional[int] = features['''words''']
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
UpperCamelCase__ : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ : Union[str, Any] = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ : Tuple = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 106 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
'''simple docstring'''
def __init__( self : List[Any] , snake_case : int , snake_case : int , snake_case : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCAmelCase_ :List[Any] = img
UpperCAmelCase_ :Any = img.shape[1]
UpperCAmelCase_ :str = img.shape[0]
UpperCAmelCase_ :Dict = dst_width
UpperCAmelCase_ :List[Any] = dst_height
UpperCAmelCase_ :Optional[Any] = self.src_w / self.dst_w
UpperCAmelCase_ :Dict = self.src_h / self.dst_h
UpperCAmelCase_ :Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case_ ( self : Dict ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ :List[Any] = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def snake_case_ ( self : Tuple , snake_case : int ):
return int(self.ratio_x * x )
def snake_case_ ( self : int , snake_case : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCamelCase = 8_00, 6_00
__lowerCamelCase = imread("image_data/lena.jpg", 1)
__lowerCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 608 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461 | 0 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase_ = b * b - 4 * a * c
lowerCAmelCase_ = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
lowerCAmelCase_ = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 279 |
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=1_60 , _snake_case=30 , _snake_case=4_00 , _snake_case=0.0 , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
_snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(_snake_case , log_spec.max() - 8.0 )
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase__ ( _snake_case , _snake_case , _snake_case = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(_snake_case , np.intaa )
lowerCAmelCase = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(_snake_case )
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , _snake_case , _snake_case = True , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "max_length" , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
lowerCAmelCase = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCAmelCase = self.pad(
_snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCAmelCase = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 4 | '''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCAmelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
def snake_case_ ( __snake_case : int) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ : Any =(
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__A : Union[str, Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model=_lowerCamelCase , tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
lowerCAmelCase_ = '''What is the placebo?'''
lowerCAmelCase_ = [
{
'''image''': load_image(_lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = dqa_pipeline(_lowerCamelCase , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''How many cats are there?'''
lowerCAmelCase_ = [
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , words=_lowerCamelCase , boxes=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCAmelCase_ ( self ):
pass
| 274 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list[int] ,__a : int ) -> int:
"""simple docstring"""
if len(__a ) < k or k < 0:
raise ValueError('''Invalid Input''' )
_a : Any = sum(array[:k] )
for i in range(len(__a ) - k ):
_a : int = current_sum - array[i] + array[i + k]
_a : List[Any] = max(__a ,__a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
a__ = [randint(-1000, 1000) for i in range(100)]
a__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 14 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a ( __a ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase__ :int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase__ :str = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase__ :List[Any] = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = matrix[1][1], matrix[0][0]
UpperCamelCase__ , UpperCamelCase__ :Dict = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase__ :int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
UpperCamelCase__ :List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase__ :Tuple = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase__ :List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase__ :Union[str, Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase__ :List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase__ :Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase__ :List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase__ :List[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase__ :List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase__ :Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase__ :Optional[Any] = array(__a )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase__ :Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase__ :Any = array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' ) | 189 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a__ ( UpperCamelCase__ ):
_a : List[Any] = """facebook/bart-large-mnli"""
_a : Optional[Any] = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_a : Union[str, Any] = """text_classifier"""
_a : int = AutoTokenizer
_a : Tuple = AutoModelForSequenceClassification
_a : str = ["""text""", ["""text"""]]
_a : List[Any] = ["""text"""]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__lowerCAmelCase = int(__A )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 715 |
from math import ceil, sqrt
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 552 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[int]:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
A__ = 4
A__ = (1 << p) - 1
for _ in range(p - 2 ):
A__ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 514 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "The csv file to plot."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__SCREAMING_SNAKE_CASE = list_field(
default=a , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def _UpperCAmelCase (UpperCamelCase__ : int ):
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Dict:
_A : int = args
_A : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="") as csv_file:
_A : Union[str, Any] = csv.DictReader(__lowerCamelCase)
for row in reader:
_A : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
_A : Union[str, Any] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
_A : Dict = float(row["result"])
def _lowerCamelCase ( self) -> Dict:
_A , _A : Any = plt.subplots()
_A : Tuple = "Time usage" if self.args.is_time else "Memory usage"
_A : int = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_A : Optional[int] = sorted(set(self.result_dict[model_name]["bsz"]))
_A : List[str] = sorted(set(self.result_dict[model_name]["seq_len"]))
_A : int = self.result_dict[model_name]["result"]
((_A) , (_A)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
_A : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_A : Union[str, Any] = np.asarray(__lowerCamelCase , __lowerCamelCase)[: len(__lowerCamelCase)]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(__lowerCamelCase , __lowerCamelCase , "--")
title_str += F" {label_model_name} vs."
_A : int = title_str[:-4]
_A : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCamelCase)
plt.xlabel(__lowerCamelCase)
plt.ylabel(__lowerCamelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _UpperCAmelCase ():
_A : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
_A : str = parser.parse_args_into_dataclasses()[0]
_A : Optional[Any] = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 503 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase = {"""facebook/blenderbot-3B""": 128}
class _a ( UpperCamelCase__ ):
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ["input_ids", "attention_mask"]
_lowercase : Tuple = BlenderbotTokenizer
def __init__( self: Tuple , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int="replace" , UpperCamelCase_: List[Any]="<s>" , UpperCamelCase_: Optional[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: int="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=True , **UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__ = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**UpperCamelCase_ )
lowercase__ = add_prefix_space
lowercase__ = '''post_processor'''
lowercase__ = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['''sep'''] )
if "cls" in state:
lowercase__ = tuple(state['''cls'''] )
lowercase__ = False
if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(UpperCamelCase_ , state.pop('''type''' ) )
lowercase__ = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
lowercase__ = value
def lowerCamelCase_ ( self: str , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Dict ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Any ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
lowercase__ = ''' '''.join(UpperCamelCase_ )
lowercase__ = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 706 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''adaptor.''' )[-1]
lowercase__ = name.split('''.''' )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
lowercase__ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
lowercase__ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''mbart50'''
lowercase__ = '''wav2vec2'''
lowercase__ = tokenizer.eos_token_id
lowercase__ = 25_00_04
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_0004, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 429 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ : str = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ : Dict = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = plt.imshow(_lowercase )
fig.axes.get_xaxis().set_visible(_lowercase )
fig.axes.get_yaxis().set_visible(_lowercase )
plt.show()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = datetime.now()
UpperCAmelCase_ : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp | 30 |
"""simple docstring"""
from torch import nn
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = class_size
SCREAMING_SNAKE_CASE_ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
SCREAMING_SNAKE_CASE_ : Any = nn.Linear(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.mlp(lowercase_)
return logits
| 512 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __magic_name__ ( UpperCAmelCase__):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ):
with open(__lowerCAmelCase , encoding="""utf-8""" ) as input_file:
lowercase_ : Any = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowercase_ : str = input_file.read()
lowercase_ : List[str] = regexp.search(__lowerCAmelCase )
return match
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
with open(__lowerCAmelCase , encoding="""utf-8""" ) as input_file:
lowercase_ : str = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowercase_ : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase_ : Dict = regexp.finditer(__lowerCAmelCase )
lowercase_ : List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = Path("""./datasets""" )
lowercase_ : Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = Path("""./datasets""" )
lowercase_ : List[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 709 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any]=False):
UpperCamelCase = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight'))
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias'))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias'))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : int=False):
for i in range(config.num_hidden_layers):
if base_model:
UpperCamelCase = ''''''
else:
UpperCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight')
UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( _UpperCAmelCase : Optional[Any]):
UpperCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase)
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any]):
UpperCamelCase = dct.pop(_UpperCAmelCase)
UpperCamelCase = val
def __snake_case ( ):
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase).raw)
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase : List[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]=False):
UpperCamelCase = BitConfig(
global_padding='''same''', layer_type='''bottleneck''', depths=(3, 4, 9), out_features=['''stage3'''], embedding_dynamic_padding=_UpperCAmelCase, )
UpperCamelCase = ViTHybridConfig(backbone_config=_UpperCAmelCase, image_size=384, num_labels=1000)
UpperCamelCase = False
# load original model from timm
UpperCamelCase = timm.create_model(_UpperCAmelCase, pretrained=_UpperCAmelCase)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase)
UpperCamelCase = create_rename_keys(_UpperCAmelCase, _UpperCAmelCase)
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase, repo_type='''dataset'''), '''r'''))
UpperCamelCase = {int(_UpperCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase = ViTHybridModel(_UpperCAmelCase).eval()
else:
UpperCamelCase = ViTHybridForImageClassification(_UpperCAmelCase).eval()
model.load_state_dict(_UpperCAmelCase)
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({}, model=_UpperCAmelCase))
UpperCamelCase = transform.transforms
UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=_UpperCAmelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=_UpperCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
UpperCamelCase = prepare_img()
UpperCamelCase = transform(_UpperCAmelCase).unsqueeze(0)
UpperCamelCase = processor(_UpperCAmelCase, return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase, _UpperCAmelCase)
# verify logits
with torch.no_grad():
UpperCamelCase = model(_UpperCAmelCase)
UpperCamelCase = outputs.logits
print('''Predicted class:''', logits.argmax(-1).item())
if base_model:
UpperCamelCase = timm_model.forward_features(_UpperCAmelCase)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase, outputs.pooler_output, atol=1E-3)
else:
UpperCamelCase = timm_model(_UpperCAmelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase, outputs.logits, atol=1E-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(_UpperCAmelCase)
print(f'Saving processor to {pytorch_dump_folder_path}')
processor.save_pretrained(_UpperCAmelCase)
if push_to_hub:
print(f'Pushing model and processor to the hub {vit_name}')
model.push_to_hub(f'ybelkada/{vit_name}')
processor.push_to_hub(f'ybelkada/{vit_name}')
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
snake_case_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( _snake_case : float, _snake_case : float, _snake_case : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 227 | """simple docstring"""
import argparse
import datetime
def __UpperCAmelCase ( _snake_case : str ):
_lowercase = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_lowercase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_snake_case ) < 1_1:
raise ValueError("Must be 10 characters long" )
# Get month
_lowercase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("Month must be between 1 - 12" )
_lowercase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_lowercase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_lowercase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_lowercase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_lowercase = datetime.date(int(_snake_case ), int(_snake_case ), int(_snake_case ) )
# Start math
if m <= 2:
_lowercase = y - 1
_lowercase = m + 1_2
# maths var
_lowercase = int(str(_snake_case )[:2] )
_lowercase = int(str(_snake_case )[2:] )
_lowercase = int(2.6 * m - 5.3_9 )
_lowercase = int(c / 4 )
_lowercase = int(k / 4 )
_lowercase = int(d + k )
_lowercase = int(t + u + v + x )
_lowercase = int(z - (2 * c) )
_lowercase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_lowercase = f"""Your date {date_input}, is a {days[str(_snake_case )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Any = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
__UpperCamelCase : str = parser.parse_args()
zeller(args.date_input) | 227 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : int ) -> None:
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : Node ) -> None:
__magic_name__ = tree
def __A ( self : List[str] , _lowerCamelCase : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__A ):
"""simple docstring"""
lowerCamelCase = ['''note_seq''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
requires_backends(self , ["""note_seq"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> int:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
requires_backends(cls , ["""note_seq"""] )
| 385 |
'''simple docstring'''
import numpy as np
import qiskit
def UpperCAmelCase ( a_ = 8 , a_ = None ) -> str:
"""simple docstring"""
A_ : List[Any] = np.random.default_rng(seed=a_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
A_ : Union[str, Any] = 6 * key_len
# Measurement basis for Alice's qubits.
A_ : Dict = rng.integers(2 , size=a_ )
# The set of states Alice will prepare.
A_ : Optional[int] = rng.integers(2 , size=a_ )
# Measurement basis for Bob's qubits.
A_ : List[Any] = rng.integers(2 , size=a_ )
# Quantum Circuit to simulate BB84
A_ : Optional[Any] = qiskit.QuantumCircuit(a_ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(a_ ):
if alice_state[index] == 1:
bbaa_circ.x(a_ )
if alice_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(a_ ):
if bob_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
A_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
A_ : int = qiskit.execute(a_ , a_ , shots=1 , seed_simulator=a_ )
# Returns the result of measurement.
A_ : Any = job.result().get_counts(a_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
A_ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
a_ , a_ , a_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
A_ : Optional[int] = gen_key[:key_len] if len(a_ ) >= key_len else gen_key.ljust(a_ , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 385 | 1 |
import unittest
from transformers import DonutProcessor
a_ : List[Any] = 'naver-clova-ix/donut-base'
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = DonutProcessor.from_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowerCamelCase = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowerCamelCase = self.processor.tokenajson(__a )
self.assertDictEqual(__a , __a ) | 623 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = MobileBertConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase = MobileBertForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
lowerCamelCase = load_tf_weights_in_mobilebert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 623 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 709 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[Any] = 'poolformer'
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 1_28, 3_20, 5_12] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , **__UpperCamelCase , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : List[str] = patch_size
__UpperCamelCase : str = stride
__UpperCamelCase : Tuple = padding
__UpperCamelCase : Optional[Any] = pool_size
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : List[Any] = mlp_ratio
__UpperCamelCase : Union[str, Any] = depths
__UpperCamelCase : Optional[Any] = patch_sizes
__UpperCamelCase : str = strides
__UpperCamelCase : List[str] = num_encoder_blocks
__UpperCamelCase : Tuple = drop_path_rate
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = use_layer_scale
__UpperCamelCase : List[Any] = layer_scale_init_value
__UpperCamelCase : Tuple = initializer_range
super().__init__(**__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Tuple = version.parse('1.11' )
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 2E-3 | 327 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {"tokenizer_file": "tokenizer.json"}
lowercase : Any = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = ['input_ids', 'attention_mask']
lowercase : Optional[int] = None
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase=False , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__UpperCamelCase : Optional[int] = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
__UpperCamelCase : int = add_prefix_space
__UpperCamelCase : List[Any] = pre_tok_class(**__UpperCamelCase )
__UpperCamelCase : List[Any] = add_prefix_space
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : int = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : Any = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
__UpperCamelCase : int = input_ids[-self.model_max_length :]
return input_ids | 327 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 531 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase = {"""UserAgent""": UserAgent().random}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> dict:
"""simple docstring"""
snake_case_ = script.contents[0]
snake_case_ = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = F'''https://www.instagram.com/{username}/'''
snake_case_ = self.get_json()
def UpperCamelCase__ ( self ):
snake_case_ = requests.get(self.url , headers=_UpperCAmelCase ).text
snake_case_ = BeautifulSoup(_UpperCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def UpperCamelCase__ ( self ):
return self.user_data["username"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["full_name"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["biography"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["business_email"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["external_url"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["is_verified"]
@property
def UpperCamelCase__ ( self ):
return self.user_data["is_private"]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = "github" )-> None:
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
snake_case_ = InstagramUser(SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''') | 531 | 1 |
'''simple docstring'''
import numpy as np
lowerCAmelCase__ : List[str] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class a :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
snake_case__ : int = np.array(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = np.where(letter == self.SQUARE )
snake_case__ : str = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __magic_name__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __magic_name__ ( self : List[str] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : List[str] = message.lower()
snake_case__ : Optional[int] = message.replace(''' ''' , '''''' )
snake_case__ : Optional[int] = message.replace('''j''' , '''i''' )
snake_case__ : str = np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
snake_case__ : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
snake_case__ : int = numbers[0]
snake_case__ : Union[str, Any] = numbers[1]
snake_case__ : List[str] = first_step.reshape(2 * len(snake_case_ ) )
snake_case__ : Optional[Any] = ''''''
for numbers_index in range(len(snake_case_ ) ):
snake_case__ : Dict = int(second_step[numbers_index * 2] )
snake_case__ : Any = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : str = self.numbers_to_letter(snake_case_ , snake_case_ )
snake_case__ : List[Any] = encoded_message + letter
return encoded_message
def __magic_name__ ( self : List[Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : List[str] = message.lower()
message.replace(''' ''' , '''''' )
snake_case__ : Optional[int] = np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
snake_case__ : Any = self.letter_to_numbers(message[letter_index] )
snake_case__ : Tuple = numbers[0]
snake_case__ : int = numbers[1]
snake_case__ : List[str] = first_step.reshape((2, len(snake_case_ )) )
snake_case__ : Dict = ''''''
for numbers_index in range(len(snake_case_ ) ):
snake_case__ : Union[str, Any] = int(second_step[0, numbers_index] )
snake_case__ : Tuple = int(second_step[1, numbers_index] )
snake_case__ : Tuple = self.numbers_to_letter(snake_case_ , snake_case_ )
snake_case__ : List[str] = decoded_message + letter
return decoded_message
| 347 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[str]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Any=False , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]="None" , snake_case_ : Dict=3 , snake_case_ : Optional[int]=4 , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : int = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : List[str] = relative_attention
snake_case__ : Optional[int] = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[Any] = scope
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Tuple = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : List[Any] = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Dict = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Optional[int] = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : Any = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
snake_case__ : Tuple = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
snake_case__ : Dict = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 347 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 |
def _lowercase ( a__ : int , a__ : int ) -> float:
"""simple docstring"""
return base * power(a__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
__lowerCAmelCase = int(input("""Enter the base: """).strip())
__lowerCAmelCase = int(input("""Enter the exponent: """).strip())
__lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCAmelCase = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 589 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__magic_name__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__magic_name__ = {
'''ctrl''': 256,
}
__magic_name__ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = CONTROL_CODES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<unk>" , **lowerCamelCase ):
super().__init__(unk_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
return word
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 276 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ) -> List[str]:
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = BlipImageProcessor()
_lowerCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_lowerCamelCase = InstructBlipProcessor(snake_case__ , snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Union[str, Any] , **snake_case__ : Union[str, Any] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def _snake_case ( self : Union[str, Any] , **snake_case__ : Any ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def _snake_case ( self : Tuple , **snake_case__ : Any ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).qformer_tokenizer
def _snake_case ( self : int ) -> str:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Optional[Any] ) -> str:
_lowerCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCamelCase = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Any ) -> Optional[Any]:
_lowerCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case__ )
def _snake_case ( self : Union[str, Any] ) -> Dict:
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = image_processor(snake_case__ , return_tensors='np' )
_lowerCamelCase = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCamelCase = 'lower newer'
_lowerCamelCase = processor(text=snake_case__ )
_lowerCamelCase = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
_lowerCamelCase = qformer_tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def _snake_case ( self : Union[str, Any] ) -> Dict:
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCamelCase = 'lower newer'
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase = processor.batch_decode(snake_case__ )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCamelCase = 'lower newer'
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , ) | 718 | import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : str ) -> List[str]:
_lowerCamelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowerCamelCase = MaskFormerConfig(backbone_config=UpperCamelCase )
_lowerCamelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase = 8_47
_lowerCamelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
_lowerCamelCase = 1_50
_lowerCamelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase = 1_71
_lowerCamelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
_lowerCamelCase = 1_33
_lowerCamelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase = 19
_lowerCamelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase = 65
_lowerCamelCase = 'mapillary-vistas-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCamelCase ( UpperCamelCase : Any ) -> Any:
_lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = dct.pop(UpperCamelCase )
_lowerCamelCase = val
def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[:dim, :]
_lowerCamelCase = in_proj_bias[: dim]
_lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase = in_proj_weight[
-dim :, :
]
_lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> str:
# fmt: off
_lowerCamelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[: hidden_size, :]
_lowerCamelCase = in_proj_bias[:config.hidden_size]
_lowerCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase = in_proj_weight[-hidden_size :, :]
_lowerCamelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[: hidden_size, :]
_lowerCamelCase = in_proj_bias[:config.hidden_size]
_lowerCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase = in_proj_weight[-hidden_size :, :]
_lowerCamelCase = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCamelCase ( ) -> torch.Tensor:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool = False ) -> Dict:
_lowerCamelCase = get_maskformer_config(UpperCamelCase )
# load original state_dict
with open(UpperCamelCase , 'rb' ) as f:
_lowerCamelCase = pickle.load(UpperCamelCase )
_lowerCamelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_swin_q_k_v(UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase , UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase = torch.from_numpy(UpperCamelCase )
# load 🤗 model
_lowerCamelCase = MaskFormerForInstanceSegmentation(UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase , param.shape )
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_lowerCamelCase = prepare_img()
if "vistas" in model_name:
_lowerCamelCase = 65
elif "cityscapes" in model_name:
_lowerCamelCase = 6_55_35
else:
_lowerCamelCase = 2_55
_lowerCamelCase = True if 'ade' in model_name else False
_lowerCamelCase = MaskFormerImageProcessor(ignore_index=UpperCamelCase , reduce_labels=UpperCamelCase )
_lowerCamelCase = image_processor(UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(**UpperCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 234 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str=10 ) -> Optional[Any]:
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict=10 ) -> Optional[int]:
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> Any:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(10_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = nn.Linear(50 , 50) if is_torch_available() else None
a__ : Dict = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
a__ : List[Any] = 10
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ) -> List[Any]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> str:
_A = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Any ) -> List[Any]:
_A = fn
def __call__( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[str] ) -> Dict:
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def snake_case_ ( self : Any , __lowerCAmelCase : Optional[Any] ) -> List[str]:
_A = list(map(self , scheduler.lr_lambdas ) )
| 2 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 714 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ : # Public class to implement a graph
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
lowerCamelCase__ = row
lowerCamelCase__ = col
lowerCamelCase__ = graph
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
# Checking all 8 elements surrounding nth element
lowerCamelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCamelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCamelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : int ): # And finally, count all islands.
lowerCamelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCamelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += 1
return count
| 258 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
SCREAMING_SNAKE_CASE = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
SCREAMING_SNAKE_CASE = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
SCREAMING_SNAKE_CASE = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
SCREAMING_SNAKE_CASE = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
SCREAMING_SNAKE_CASE = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
SCREAMING_SNAKE_CASE = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
SCREAMING_SNAKE_CASE = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
SCREAMING_SNAKE_CASE = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
SCREAMING_SNAKE_CASE = key.replace('image_encoder.module' , 'flava.image_model' )
SCREAMING_SNAKE_CASE = key.replace('text_encoder.module' , 'flava.text_model' )
SCREAMING_SNAKE_CASE = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
SCREAMING_SNAKE_CASE = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
SCREAMING_SNAKE_CASE = key.replace('text_projection' , 'flava.text_projection' )
SCREAMING_SNAKE_CASE = key.replace('image_projection' , 'flava.image_projection' )
SCREAMING_SNAKE_CASE = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE = value
return upgrade
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> List[Any]:
if config_path is not None:
SCREAMING_SNAKE_CASE = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = FlavaConfig()
SCREAMING_SNAKE_CASE = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
else:
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
SCREAMING_SNAKE_CASE = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = hf_model.state_dict()
SCREAMING_SNAKE_CASE = count_parameters(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__UpperCamelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 247 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=[1, 16, 4, 4] , lowerCAmelCase__=None , ) -> int:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __A ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __A ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 247 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ ,snake_case_ = fa, f
index += 1
for _ in str(__UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 593 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ ,snake_case_ = fa, f
index += 1
for _ in str(__UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 593 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def lowerCamelCase ( _snake_case ,_snake_case = 16 ,_snake_case = "bert-base-cased" ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = load_dataset('glue' ,'mrpc' )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : int = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ : Optional[int] = datasets.map(
__lowerCAmelCase ,batched=__lowerCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : str = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase ,padding='max_length' ,max_length=128 ,return_tensors='pt' )
return tokenizer.pad(__lowerCAmelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase__ : Tuple = DataLoader(
tokenized_datasets['train'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase )
UpperCAmelCase__ : Any = DataLoader(
tokenized_datasets['validation'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
model.eval()
UpperCAmelCase__ : List[str] = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__lowerCAmelCase )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase__ : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCAmelCase ) - 1:
UpperCAmelCase__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCAmelCase ,references=__lowerCAmelCase ,)
UpperCAmelCase__ : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase ( _snake_case ,_snake_case ):
# Initialize accelerator
UpperCAmelCase__ : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Optional[int] = config["""lr"""]
UpperCAmelCase__ : str = int(config['num_epochs'] )
UpperCAmelCase__ : int = int(config['seed'] )
UpperCAmelCase__ : Optional[Any] = int(config['batch_size'] )
UpperCAmelCase__ : Any = args.model_name_or_path
set_seed(__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = get_dataloaders(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase ,return_dict=__lowerCAmelCase )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase__ : Any = optimizer_cls(params=model.parameters() ,lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase ,num_warmup_steps=0 ,num_training_steps=__lowerCAmelCase ,)
else:
UpperCAmelCase__ : Optional[Any] = DummyScheduler(__lowerCAmelCase ,total_num_steps=__lowerCAmelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ : Dict = accelerator.prepare(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Union[str, Any] = evaluate.load('glue' ,'mrpc' )
UpperCAmelCase__ : Dict = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase__ : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase__ : List[Any] = args.resume_from_checkpoint.split('epoch_' )[1]
UpperCAmelCase__ : Tuple = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase__ : Optional[int] = int(__lowerCAmelCase ) + 1
UpperCAmelCase__ : Union[str, Any] = evaluation_loop(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
accelerator.print('resumed checkpoint performance:' ,__lowerCAmelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,F'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
UpperCAmelCase__ : str = json.load(__lowerCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase__ : Tuple = {}
for epoch in range(__lowerCAmelCase ,__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = model(**__lowerCAmelCase )
UpperCAmelCase__ : Tuple = outputs.loss
UpperCAmelCase__ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase__ : Any = F'''epoch_{epoch}'''
UpperCAmelCase__ : Optional[int] = os.path.join(args.output_dir ,__lowerCAmelCase )
accelerator.save_state(__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = evaluation_loop(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase__ : int = accuracy
UpperCAmelCase__ : Union[str, Any] = lr_scheduler.get_lr()[0]
UpperCAmelCase__ : List[str] = optimizer.param_groups[0]["""lr"""]
UpperCAmelCase__ : Optional[Any] = epoch
UpperCAmelCase__ : Optional[Any] = overall_step
accelerator.print(F'''epoch {epoch}:''' ,__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,F'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__lowerCAmelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__lowerCAmelCase ,)
parser.add_argument(
'--output_dir' ,type=__lowerCAmelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__lowerCAmelCase ,default=2 ,help='Number of train epochs.' ,)
UpperCAmelCase__ : List[Any] = parser.parse_args()
UpperCAmelCase__ : List[Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase ,__lowerCAmelCase )
if __name__ == "__main__":
main()
| 110 |
"""simple docstring"""
import math
import os
import sys
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
try:
with open(__lowerCAmelCase , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lexicon.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Dict = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : str = bin(__lowerCAmelCase )[2:]
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = """""", """"""
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
SCREAMING_SNAKE_CASE__ : List[str] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : List[Any] = lexicon[curr_string]
result += last_match_id
return result
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = os.path.getsize(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bin(__lowerCAmelCase )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
try:
with open(__lowerCAmelCase , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Dict = read_file_binary(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = compress_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
print(F'''Vertex\tShortest Distance from vertex {src}''')
for i, d in enumerate(lowercase__):
print(F'''{i}\t\t{d}''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
for j in range(lowercase__):
_a , _a , _a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''') and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = [float('''inf''')] * vertex_count
_a = 0.0
for _ in range(vertex_count - 1):
for j in range(lowercase__):
_a , _a , _a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''') and distance[u] + w < distance[v]:
_a = distance[u] + w
_a = check_negative_cycle(lowercase__ , lowercase__ , lowercase__)
if negative_cycle_exists:
raise Exception('''Negative cycle found''')
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Optional[int] = int(input("Enter number of vertices: ").strip())
lowercase_ : Optional[int] = int(input("Enter number of edges: ").strip())
lowercase_ : Tuple = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
lowercase_ , lowercase_ , lowercase_ : List[str] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
lowercase_ : int = {"src": src, "dst": dest, "weight": weight}
lowercase_ : List[str] = int(input("\nEnter shortest path source:").strip())
lowercase_ : Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 719 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase : Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase : Optional[int] = 'translator'
__lowerCamelCase : int = AutoTokenizer
__lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = LANGUAGE_CODES
__lowerCamelCase : Tuple = ['text', 'text', 'text']
__lowerCamelCase : Optional[Any] = ['text']
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**A )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 352 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class A__ :
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int) -> None:
"""simple docstring"""
__lowerCAmelCase : List[Any] = size
# approximate the overall size of segment tree with given value
__lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__lowerCAmelCase : str = [0 for i in range(0 , 4 * size)]
__lowerCAmelCase : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
return idx * 2
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: list[int]) -> None:
"""simple docstring"""
if left_element == right_element:
__lowerCAmelCase : Union[str, Any] = a[left_element - 1]
else:
__lowerCAmelCase : List[Any] = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.build(self.right(_SCREAMING_SNAKE_CASE) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE)] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE)])
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
__lowerCAmelCase : List[str] = self.lazy[idx]
__lowerCAmelCase : Union[str, Any] = False
if left_element != right_element:
__lowerCAmelCase : Optional[Any] = self.lazy[idx]
__lowerCAmelCase : Any = self.lazy[idx]
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__lowerCAmelCase : Any = val
if left_element != right_element:
__lowerCAmelCase : int = val
__lowerCAmelCase : List[Any] = val
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Dict = True
return True
__lowerCAmelCase : Any = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.update(self.right(_SCREAMING_SNAKE_CASE) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE)] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE)])
return True
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
__lowerCAmelCase : str = self.lazy[idx]
__lowerCAmelCase : List[Any] = False
if left_element != right_element:
__lowerCAmelCase : Union[str, Any] = self.lazy[idx]
__lowerCAmelCase : str = self.lazy[idx]
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__lowerCAmelCase : Dict = (left_element + right_element) // 2
__lowerCAmelCase : Dict = self.query(self.left(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.query(self.right(_SCREAMING_SNAKE_CASE) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __str__( self: Tuple) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
__snake_case : int = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__snake_case : Optional[int] = 15
__snake_case : Tuple = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 293 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
__snake_case : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'}
__snake_case : Union[str, Any] = '>>zh<<'
__snake_case : List[str] = 'Helsinki-NLP/'
if is_torch_available():
__snake_case : Optional[int] = 'pt'
elif is_tf_available():
__snake_case : List[Any] = 'tf'
else:
__snake_case : Union[str, Any] = 'jax'
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MarianTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : int = Path(self.tmpdirname)
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["target_spm"])
__lowerCAmelCase : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Dict , **_SCREAMING_SNAKE_CASE: List[str]) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "</s>"
__lowerCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<pad>")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 9)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""")
__lowerCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0])
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = [x.name for x in Path(_SCREAMING_SNAKE_CASE).glob("*")]
self.assertIn("source.spm" , _SCREAMING_SNAKE_CASE)
MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : int = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 512))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = tok(["I am a tiny frog", "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
__lowerCAmelCase : List[str] = "Tämä on testi"
__lowerCAmelCase : int = "This is a test"
__lowerCAmelCase : Union[str, Any] = [76, 7, 2047, 2]
__lowerCAmelCase : Dict = [69, 12, 11, 940, 2]
__lowerCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer(text_target=_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) | 293 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any=0) -> Any:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((128, 128))
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((128, 128))
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler")
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : int=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=32 , snake_case__ : str=5 , snake_case__ : Optional[int]=4 , snake_case__ : List[str]=37 , snake_case__ : str="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[str]=10 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=0.6 , snake_case__ : Any=None , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Dict = mask_ratio
UpperCAmelCase__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : Union[str, Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase ( self : Dict , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[Any] = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase_ : List[str] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowercase_ : int = False
lowercase_ : List[Any] = False
lowercase_ : str = False
lowercase_ : List[str] = False
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Tuple ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase__ : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : List[str] = outputs[0].cpu().numpy()
UpperCAmelCase__ : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
UpperCAmelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase__ : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : Any = ViTMAEConfig()
UpperCAmelCase__ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : int = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
UpperCAmelCase__ : Tuple = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
| 199 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase( a__ : str ):
'''simple docstring'''
lowerCamelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowerCAmelCase( a__ : Tuple ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(a__ , a__ , bias=a__ )
lowerCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase( a__ : Dict , a__ : str="facebook/mbart-large-en-ro" , a__ : Any=False , a__ : Optional[int]=False ):
'''simple docstring'''
lowerCamelCase__ = torch.load(a__ , map_location="cpu" )["model"]
remove_ignore_keys_(a__ )
lowerCamelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
lowerCamelCase__ = MBartConfig.from_pretrained(a__ , vocab_size=a__ )
if mbart_aa and finetuned:
lowerCamelCase__ = "relu"
lowerCamelCase__ = state_dict["decoder.embed_tokens.weight"]
lowerCamelCase__ = MBartForConditionalGeneration(a__ )
model.model.load_state_dict(a__ )
if finetuned:
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 426 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCAmelCase : Any ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCAmelCase : Optional[int] ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = ZeroShotClassificationPipeline(
model=UpperCamelCase , tokenizer=UpperCamelCase , candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics")
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
# No kwarg
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , ["politics"])
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"])
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health")
self.assertEqual(
UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])) , 1.0)
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"])
self.assertEqual(
UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])) , 1.0)
lowerCamelCase__ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}")
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase__ = classifier(["I am happy"] , ["positive", "negative"])
self.assertEqual(
UpperCamelCase , [
{"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]}
for i in range(1)
] , )
lowerCamelCase__ = classifier(["I am happy", "I am sad"] , ["positive", "negative"])
self.assertEqual(
UpperCamelCase , [
{"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]}
for i in range(2)
] , )
with self.assertRaises(UpperCamelCase):
classifier("" , candidate_labels="politics")
with self.assertRaises(UpperCamelCase):
classifier(UpperCamelCase , candidate_labels="politics")
with self.assertRaises(UpperCamelCase):
classifier("Who are you voting for in 2020?" , candidate_labels="")
with self.assertRaises(UpperCamelCase):
classifier("Who are you voting for in 2020?" , candidate_labels=UpperCamelCase)
with self.assertRaises(UpperCamelCase):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(UpperCamelCase):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=UpperCamelCase , )
self.run_entailment_id(UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = zero_shot_classifier.model.config
lowerCamelCase__ = config.labelaid
lowerCamelCase__ = zero_shot_classifier.entailment_id
lowerCamelCase__ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
lowerCamelCase__ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCamelCase__ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCamelCase__ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
lowerCamelCase__ = original_labelaid
self.assertEqual(UpperCamelCase , zero_shot_classifier.entailment_id)
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"])
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt")
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf")
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 426 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowercase = [[1, 2, 4], [1, 2, 3, 4]]
lowercase = DisjunctiveConstraint(snake_case )
self.assertTrue(isinstance(dc.token_ids , snake_case ) )
with self.assertRaises(snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE__ ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(snake_case ):
DisjunctiveConstraint(snake_case ) # fails here
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [[1, 2, 3], [1, 2, 4]]
lowercase = DisjunctiveConstraint(snake_case )
lowercase , lowercase , lowercase = dc.update(1 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(3 )
lowercase = stepped is True and completed is True and reset is False
self.assertTrue(snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase = DisjunctiveConstraint(snake_case )
lowercase , lowercase , lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase , lowercase , lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase , lowercase , lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 84 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = (DDPMScheduler,)
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.scheduler_classes[0]
UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.scheduler_classes[0]
UpperCamelCase : int = self.get_scheduler_config()
UpperCamelCase : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.dummy_model()
UpperCamelCase : str = self.dummy_sample_deter
UpperCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase : Optional[int] = pred_prev_sample
UpperCamelCase : Tuple = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCamelCase : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.dummy_model()
UpperCamelCase : str = self.dummy_sample_deter
UpperCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase : Optional[int] = pred_prev_sample
UpperCamelCase : Dict = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.scheduler_classes[0]
UpperCamelCase : Dict = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase : int = -1
else:
UpperCamelCase : List[Any] = timesteps[i + 1]
UpperCamelCase : int = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : Optional[Any] = self.get_scheduler_config()
UpperCamelCase : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config()
UpperCamelCase : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [100, 87, 50, 1, 0]
UpperCamelCase : Any = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 710 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A: List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( UpperCamelCase : Dict ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
return max(metric_fn(UpperCamelCase , UpperCamelCase ) for gt in ground_truths )
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : str ):
UpperCAmelCase : List[str] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
UpperCAmelCase : List[Any] = pd.read_csv(UpperCamelCase , sep="""\t""" , header=UpperCamelCase )
for answer_list in data[1]:
UpperCAmelCase : int = ast.literal_eval(UpperCamelCase )
answers.append(UpperCamelCase )
else:
UpperCAmelCase : str = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
UpperCAmelCase : str = [[reference] for reference in references]
UpperCAmelCase : List[str] = 0
for prediction, ground_truths in zip(UpperCamelCase , UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
fa += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : List[str] = 100.0 * em / total
UpperCAmelCase : int = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : List[Any] ):
UpperCAmelCase : str = args.k
UpperCAmelCase : Optional[Any] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
UpperCAmelCase : Dict = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
UpperCAmelCase : List[Any] = 0
for hypo, reference in zip(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Optional[int] = set(hypo.split("""\t""" )[:k] )
UpperCAmelCase : str = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase : List[Any] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ):
def strip_title(UpperCamelCase : List[str] ):
if title.startswith("""\"""" ):
UpperCAmelCase : List[str] = title[1:]
if title.endswith("""\"""" ):
UpperCAmelCase : str = title[:-1]
return title
UpperCAmelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase , )["""input_ids"""].to(args.device )
UpperCAmelCase : List[str] = rag_model.rag.question_encoder(UpperCamelCase )
UpperCAmelCase : Dict = question_enc_outputs[0]
UpperCAmelCase : List[Any] = rag_model.retriever(
UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase : Any = []
for docs in all_docs:
UpperCAmelCase : Optional[Any] = [strip_title(UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(UpperCamelCase ) )
return provenance_strings
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ):
with torch.no_grad():
UpperCAmelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase )
UpperCAmelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
UpperCAmelCase : Dict = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase : Optional[Any] = rag_model.generate( # rag_model overwrites generate
UpperCamelCase , attention_mask=UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
if args.print_predictions:
for q, a in zip(UpperCamelCase , UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(UpperCamelCase , UpperCamelCase ) )
return answers
def _snake_case ( ):
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=UpperCamelCase , type=UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
UpperCAmelCase : Optional[int] = parser.parse_args()
UpperCAmelCase : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : List[Any] = {}
if args.model_type is None:
UpperCAmelCase : Dict = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
UpperCAmelCase : Optional[int] = args.n_docs
if args.index_name is not None:
UpperCAmelCase : List[Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase : Tuple = args.index_path
else:
UpperCAmelCase : List[Any] = BartForConditionalGeneration
UpperCAmelCase : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , UpperCamelCase )
UpperCAmelCase : str = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
UpperCAmelCase : Union[str, Any] = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(UpperCamelCase , **UpperCamelCase )
UpperCAmelCase : Optional[int] = model_class.from_pretrained(UpperCamelCase , retriever=UpperCamelCase , **UpperCamelCase )
model.retriever.init_retrieval()
else:
UpperCAmelCase : int = model_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
UpperCAmelCase : List[str] = []
for line in tqdm(UpperCamelCase ):
questions.append(line.strip() )
if len(UpperCamelCase ) == args.eval_batch_size:
UpperCAmelCase : Optional[int] = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) + """\n""" )
preds_file.flush()
UpperCAmelCase : Tuple = []
if len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) )
preds_file.flush()
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A: Optional[int] = get_args()
main(args)
| 160 |
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
if not numbers:
return 0
if not isinstance(snake_case , (list, tuple) ) or not all(
isinstance(snake_case , snake_case ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = numbers[0]
for i in range(1 , len(snake_case ) ):
# update the maximum and minimum subarray products
_UpperCAmelCase = numbers[i]
if number < 0:
_UpperCAmelCase , _UpperCAmelCase = min_till_now, max_till_now
_UpperCAmelCase = max(snake_case , max_till_now * number )
_UpperCAmelCase = min(snake_case , min_till_now * number )
# update the maximum product found till now
_UpperCAmelCase = max(snake_case , snake_case )
return max_prod | 518 | 0 |
"""simple docstring"""
from math import pow, sqrt
def UpperCamelCase ( *SCREAMING_SNAKE_CASE_ ) ->bool:
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 558 | """simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[Any]:
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : str = num_choices
def a__ ( self ) -> Dict:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_attention_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> int:
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Any:
_lowerCamelCase : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def a__ ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : str = model(_lowercase , attention_mask=_lowercase )[0]
_lowerCamelCase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_lowerCamelCase : Dict = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 558 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase_ = '''src/diffusers'''
lowerCAmelCase_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase_ = spec.loader.load_module()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _UpperCamelCase ) is not None
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = object_name.split('''.''' )
snake_case_ : Optional[int] = 0
# First let's find the module where our object lives.
snake_case_ : str = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , f'''{module}.py''' ) ):
i += 1
if i < len(_UpperCamelCase ):
snake_case_ : Optional[Any] = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_UpperCamelCase , f'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Any = f.readlines()
# Now let's find the class / func in the code!
snake_case_ : Optional[int] = ''''''
snake_case_ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ : Optional[Any] = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Any = lines[start_index:line_index]
return "".join(_UpperCamelCase )
lowerCAmelCase_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase_ = re.compile(r'''<FILL\s+[^>]*>''')
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = code.split('''\n''' )
snake_case_ : Optional[int] = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
snake_case_ : Optional[Any] = f'''class Bla:\n{code}'''
snake_case_ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_UpperCamelCase )
snake_case_ : str = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
snake_case_ , snake_case_ : Optional[int] = style_docstrings_in_code(_UpperCamelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : int = f.readlines()
snake_case_ : Optional[Any] = []
snake_case_ : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
snake_case_ : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ , snake_case_ , snake_case_ : Dict = search.groups()
snake_case_ : str = find_code_in_diffusers(_UpperCamelCase )
snake_case_ : Tuple = get_indent(_UpperCamelCase )
snake_case_ : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ : Optional[Any] = theoretical_indent
snake_case_ : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ : Optional[int] = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
snake_case_ : Union[str, Any] = lines[line_index]
snake_case_ : Union[str, Any] = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(f'''^{indent}# End copy''' , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Union[str, Any] = lines[start_index:line_index]
snake_case_ : List[str] = ''''''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ : List[str] = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
snake_case_ : str = '''\n'''.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
snake_case_ : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
snake_case_ : List[Any] = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ , snake_case_ , snake_case_ : Optional[int] = pattern.groups()
snake_case_ : Dict = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
snake_case_ : List[Any] = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
snake_case_ : Optional[Any] = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ : str = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ : Any = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def lowerCamelCase_ ( _UpperCamelCase = False ) -> str:
"""simple docstring"""
snake_case_ : str = glob.glob(os.path.join(_UpperCamelCase , '''**/*.py''' ) , recursive=_UpperCamelCase )
snake_case_ : Optional[Any] = []
for filename in all_files:
snake_case_ : Optional[Any] = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
snake_case_ : Optional[Any] = '''\n'''.join(_UpperCamelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 60 |
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCamelCase : Any = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a )
| 707 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='''dataset''' , revision=__magic_name__ )
| 407 | 0 |
UpperCAmelCase : dict[tuple[int, int, int], int] = {}
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
a__ : Optional[Any] =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
a__ : Union[str, Any] =_calculate(days - 1 , __A , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
a__ : str =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
a__ : List[str] =_calculate(days - 1 , __A , 0 )
a__ : List[str] =state_late + state_absent + state_ontime
a__ : List[Any] =prizestrings
return prizestrings
def _A ( SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
return _calculate(__A , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
def snake_case_ ( __snake_case : Union[str, Any]) -> Optional[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def snake_case_ ( ) -> Any:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 717 | '''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
A_ : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCAmelCase_ = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = {}
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 ):
if num_frames is None:
lowerCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowerCAmelCase_ = BytesIO(requests.get(_lowerCamelCase ).content )
lowerCAmelCase_ = VideoReader(_lowerCamelCase )
videoreader.seek(0 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ = np.linspace(_lowerCamelCase , _lowerCamelCase , num=_lowerCamelCase , dtype=np.intaa )
lowerCAmelCase_ = videoreader.get_batch(_lowerCamelCase ).asnumpy()
lowerCAmelCase_ = list(_lowerCamelCase )
lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.model(**_lowerCamelCase )
return model_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ ,lowerCAmelCase_ = probs.topk(_lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 606 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a_ ( snake_case ):
UpperCAmelCase : Union[str, Any] = """detr"""
UpperCAmelCase : Tuple = ["""past_key_values"""]
UpperCAmelCase : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , a_ : str=True , a_ : str=None , a_ : Dict=3 , a_ : Any=1_0_0 , a_ : Optional[Any]=6 , a_ : List[Any]=2_0_4_8 , a_ : str=8 , a_ : Any=6 , a_ : List[str]=2_0_4_8 , a_ : Any=8 , a_ : str=0.0 , a_ : int=0.0 , a_ : Optional[Any]=True , a_ : int="relu" , a_ : str=2_5_6 , a_ : int=0.1 , a_ : Any=0.0 , a_ : Tuple=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=1.0 , a_ : Union[str, Any]=False , a_ : Dict="sine" , a_ : int="resnet50" , a_ : Optional[int]=True , a_ : List[Any]=False , a_ : Any=1 , a_ : Optional[int]=5 , a_ : Tuple=2 , a_ : int=1 , a_ : Tuple=1 , a_ : List[Any]=5 , a_ : List[str]=2 , a_ : Optional[int]=0.1 , **a_ : str , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case: Union[str, Any] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a_ , a_ ):
snake_case: List[Any] =backbone_config.get('model_type' )
snake_case: Tuple =CONFIG_MAPPING[backbone_model_type]
snake_case: List[str] =config_class.from_dict(a_ )
# set timm attributes to None
snake_case , snake_case , snake_case: Tuple =None, None, None
snake_case: Optional[Any] =use_timm_backbone
snake_case: Tuple =backbone_config
snake_case: Tuple =num_channels
snake_case: Any =num_queries
snake_case: Tuple =d_model
snake_case: Tuple =encoder_ffn_dim
snake_case: Dict =encoder_layers
snake_case: Tuple =encoder_attention_heads
snake_case: Tuple =decoder_ffn_dim
snake_case: Optional[int] =decoder_layers
snake_case: Optional[int] =decoder_attention_heads
snake_case: str =dropout
snake_case: int =attention_dropout
snake_case: Dict =activation_dropout
snake_case: Optional[int] =activation_function
snake_case: str =init_std
snake_case: Union[str, Any] =init_xavier_std
snake_case: Union[str, Any] =encoder_layerdrop
snake_case: Tuple =decoder_layerdrop
snake_case: List[Any] =encoder_layers
snake_case: List[Any] =auxiliary_loss
snake_case: Optional[Any] =position_embedding_type
snake_case: List[Any] =backbone
snake_case: int =use_pretrained_backbone
snake_case: Any =dilation
# Hungarian matcher
snake_case: Optional[Any] =class_cost
snake_case: Tuple =bbox_cost
snake_case: List[Any] =giou_cost
# Loss coefficients
snake_case: List[str] =mask_loss_coefficient
snake_case: int =dice_loss_coefficient
snake_case: Dict =bbox_loss_coefficient
snake_case: Optional[int] =giou_loss_coefficient
snake_case: Any =eos_coefficient
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : List[Any] ) -> int:
return self.d_model
@classmethod
def UpperCamelCase ( cls : Any , a_ : PretrainedConfig , **a_ : int ) -> int:
return cls(backbone_config=a_ , **a_ )
def UpperCamelCase ( self : List[str] ) -> Dict[str, any]:
snake_case: Any =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case: Tuple =self.backbone_config.to_dict()
snake_case: Optional[int] =self.__class__.model_type
return output
class a_ ( snake_case ):
UpperCAmelCase : Optional[Any] = version.parse("""1.11""" )
@property
def UpperCamelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self : str ) -> float:
return 1E-5
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
return 1_2
| 350 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __UpperCAmelCase ) -> list[int]:
"""simple docstring"""
snake_case: Tuple =[True] * limit
snake_case: Optional[int] =False
snake_case: Union[str, Any] =False
snake_case: List[Any] =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case: str =i * 2
while index < limit:
snake_case: List[Any] =False
snake_case: List[str] =index + i
snake_case: Union[str, Any] =[2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def a_ ( __UpperCAmelCase = 1_00_00_00 ) -> int:
"""simple docstring"""
snake_case: str =prime_sieve(__UpperCAmelCase )
snake_case: str =0
snake_case: str =0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length , len(__UpperCAmelCase ) ):
snake_case: Tuple =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case: List[str] =j - i
snake_case: Optional[int] =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 350 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : list[list] ) -> list[list]:
_SCREAMING_SNAKE_CASE : Optional[Any] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Any = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_SCREAMING_SNAKE_CASE : Optional[int] = column
continue
_SCREAMING_SNAKE_CASE : int = column / magnitude
# Subtract to cancel term
_SCREAMING_SNAKE_CASE : List[Any] = current_set[0]
_SCREAMING_SNAKE_CASE : Tuple = [first_row]
_SCREAMING_SNAKE_CASE : Optional[Any] = current_set[1::]
for row in current_set:
_SCREAMING_SNAKE_CASE : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_SCREAMING_SNAKE_CASE : Union[str, Any] = final_set[0]
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Any = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_SCREAMING_SNAKE_CASE : Optional[int] = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = resultant
return final_set
def _lowerCAmelCase ( lowerCamelCase__ : list[list] ) -> list:
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_SCREAMING_SNAKE_CASE : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_SCREAMING_SNAKE_CASE : Dict = data_set.copy()
_SCREAMING_SNAKE_CASE : int = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_SCREAMING_SNAKE_CASE : str = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = data_set.copy()
_SCREAMING_SNAKE_CASE : Dict = simplify(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = simplified[::-1]
_SCREAMING_SNAKE_CASE : list = []
for row in simplified:
_SCREAMING_SNAKE_CASE : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_SCREAMING_SNAKE_CASE : str = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_SCREAMING_SNAKE_CASE : List[Any] = temp_row[1::]
_SCREAMING_SNAKE_CASE : str = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Optional[int] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 704 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Any=1_0_2_4 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = [], []
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(zip(lowerCamelCase__, lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = sorted_examples[0]
def is_too_big(lowerCamelCase__ : List[Any] ):
return tok(lowerCamelCase__, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_SCREAMING_SNAKE_CASE : int = new_src + " " + src
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(lowerCamelCase__ ) or is_too_big(lowerCamelCase__ ): # cant fit, finalize example
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = src, tgt
else: # can fit, keep adding
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
return finished_src, finished_tgt
def _lowerCAmelCase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Path, lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = Path(lowerCamelCase__ )
save_path.mkdir(exist_ok=lowerCamelCase__ )
for split in ["train"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
_SCREAMING_SNAKE_CASE : Any = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = pack_examples(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
print(f'''packed {split} split from {len(lowerCamelCase__ )} examples -> {len(lowerCamelCase__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
for split in ["val", "test"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.source''' )
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.target''' )
def _lowerCAmelCase ( ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=lowerCamelCase__, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=lowerCamelCase__, default=1_2_8 )
parser.add_argument("--data_dir", type=lowerCamelCase__ )
parser.add_argument("--save_path", type=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase__, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 295 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( __UpperCAmelCase : int ) -> Optional[Any]:
__magic_name__: Dict = VideoMAEConfig()
set_architecture_configs(__UpperCAmelCase , __UpperCAmelCase )
if "finetuned" not in model_name:
__magic_name__: Dict = False
if "finetuned" in model_name:
__magic_name__: Tuple = """huggingface/label-files"""
if "kinetics" in model_name:
__magic_name__: Any = 4_0_0
__magic_name__: Any = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__magic_name__: List[str] = 1_7_4
__magic_name__: Any = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__magic_name__: int = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__magic_name__: Union[str, Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__magic_name__: int = idalabel
__magic_name__: str = {v: k for k, v in idalabel.items()}
return config
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if "small" in model_name:
__magic_name__: List[Any] = 3_8_4
__magic_name__: Optional[int] = 1_5_3_6
__magic_name__: Union[str, Any] = 1_2
__magic_name__: List[Any] = 1_6
__magic_name__: Tuple = 1_2
__magic_name__: Optional[Any] = 3
__magic_name__: Dict = 1_9_2
__magic_name__: Tuple = 7_6_8
elif "large" in model_name:
__magic_name__: int = 1_0_2_4
__magic_name__: Optional[int] = 4_0_9_6
__magic_name__: Dict = 2_4
__magic_name__: Any = 1_6
__magic_name__: Optional[int] = 1_2
__magic_name__: str = 8
__magic_name__: Optional[Any] = 5_1_2
__magic_name__: Tuple = 2_0_4_8
elif "huge" in model_name:
__magic_name__: str = 1_2_8_0
__magic_name__: Optional[int] = 5_1_2_0
__magic_name__: Any = 3_2
__magic_name__: int = 1_6
__magic_name__: Any = 1_2
__magic_name__: int = 8
__magic_name__: Any = 6_4_0
__magic_name__: Tuple = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( __UpperCAmelCase : str ) -> Optional[int]:
if "encoder." in name:
__magic_name__: List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__magic_name__: Any = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__magic_name__: Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__magic_name__: Any = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__magic_name__: Tuple = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__magic_name__: Optional[int] = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__magic_name__: int = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__magic_name__: Any = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__magic_name__: Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__magic_name__: str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__magic_name__: Dict = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__magic_name__: Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__magic_name__: str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__: List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__: Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__magic_name__: List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__magic_name__: Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__magic_name__: Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__magic_name__: List[Any] = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__magic_name__: List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__magic_name__: Dict = name.replace("""head""" , """classifier""" )
return name
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__magic_name__: List[Any] = orig_state_dict.pop(__UpperCAmelCase )
if key.startswith("""encoder.""" ):
__magic_name__: Optional[Any] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__magic_name__: List[Any] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__magic_name__: Tuple = config.decoder_hidden_size
__magic_name__: Optional[Any] = int(key_split[2] )
__magic_name__: Tuple = """decoder.decoder_layers."""
if "weight" in key:
__magic_name__: List[str] = val[:dim, :]
__magic_name__: Optional[Any] = val[dim : dim * 2, :]
__magic_name__: Tuple = val[-dim:, :]
else:
__magic_name__: Tuple = config.hidden_size
__magic_name__: Tuple = int(key_split[1] )
__magic_name__: Tuple = """videomae.encoder.layer."""
if "weight" in key:
__magic_name__: Any = val[:dim, :]
__magic_name__: List[str] = val[dim : dim * 2, :]
__magic_name__: Tuple = val[-dim:, :]
else:
__magic_name__: Dict = val
return orig_state_dict
def a ( ) -> List[str]:
__magic_name__: int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__magic_name__: Any = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Tuple:
__magic_name__: Tuple = get_videomae_config(__UpperCAmelCase )
if "finetuned" in model_name:
__magic_name__: int = VideoMAEForVideoClassification(__UpperCAmelCase )
else:
__magic_name__: List[Any] = VideoMAEForPreTraining(__UpperCAmelCase )
# download original checkpoint, hosted on Google Drive
__magic_name__: Dict = """pytorch_model.bin"""
gdown.cached_download(__UpperCAmelCase , __UpperCAmelCase , quiet=__UpperCAmelCase )
__magic_name__: str = torch.load(__UpperCAmelCase , map_location="""cpu""" )
if "model" in files:
__magic_name__: Union[str, Any] = files["""model"""]
else:
__magic_name__: List[Any] = files["""module"""]
__magic_name__: List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify model on basic input
__magic_name__: Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__magic_name__: Any = prepare_video()
__magic_name__: Optional[int] = image_processor(__UpperCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__magic_name__: Dict = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__magic_name__: List[Any] = torch.load(__UpperCAmelCase )
__magic_name__: int = model(**__UpperCAmelCase )
__magic_name__: int = outputs.logits
__magic_name__: Optional[int] = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__magic_name__: Optional[int] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__magic_name__: str = torch.Size([1, 1_7_4] )
__magic_name__: str = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__magic_name__: Any = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: Optional[Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__magic_name__: List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: int = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__magic_name__: Optional[int] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__magic_name__: str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: int = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__magic_name__: str = torch.Size([1, 4_0_0] )
__magic_name__: Union[str, Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__magic_name__: Any = torch.Size([1, 4_0_0] )
__magic_name__: Tuple = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__magic_name__: List[Any] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__magic_name__: List[str] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__magic_name__: str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: str = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__magic_name__: List[Any] = torch.Size([1, 1_7_4] )
__magic_name__: Optional[int] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__magic_name__: List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: Any = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__magic_name__: Optional[Any] = torch.Size([1, 1_7_4] )
__magic_name__: Any = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__magic_name__: Tuple = outputs.loss
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 96 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self : Optional[int] , UpperCamelCase : UNetaDModel , UpperCamelCase : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase : int = 1 , UpperCamelCase : int = 50 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.unet.config.sample_size
_lowercase : Dict = (batch_size, 3, img_size, img_size)
_lowercase : int = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase : Any = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowercase : Any = self.scheduler.schedule[t]
_lowercase : Tuple = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase : Any = self.scheduler.add_noise_to_input(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase : Dict = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowercase : Any = self.scheduler.step_correct(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
_lowercase : Dict = step_output.prev_sample
_lowercase : List[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase ) | 322 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279 |
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
if not sentence:
return ""
lowerCAmelCase_ = dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 279 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.